From 774af31d2477fe38d61870c5b4a6066a21dff9ca Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 14 May 2021 13:55:11 +0200 Subject: [PATCH 01/46] Revert 'Revert "add --node-label flag for node create command (#584, @developer-guy, @ejose, @dentrax)"' This reverts commit 70872648b345af93380653a357cc5c215614fb7c. --- cmd/node/nodeCreate.go | 23 +++++++++++++++++++++-- pkg/client/node.go | 5 +++++ pkg/types/types.go | 39 ++++++++++++++++++++------------------- 3 files changed, 46 insertions(+), 21 deletions(-) diff --git a/cmd/node/nodeCreate.go b/cmd/node/nodeCreate.go index 52f255d5..cc7c2b10 100644 --- a/cmd/node/nodeCreate.go +++ b/cmd/node/nodeCreate.go @@ -23,6 +23,7 @@ package node import ( "fmt" + "strings" "time" "github.com/spf13/cobra" @@ -73,6 +74,8 @@ func NewCmdNodeCreate() *cobra.Command { cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", false, "Wait for the node(s) to be ready before returning.") cmd.Flags().DurationVar(&createNodeOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.") + cmd.Flags().StringSliceP("k3s-node-label", "", []string{}, "Specify k3s node labels in format \"foo=bar\"") + // done return cmd } @@ -124,6 +127,21 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl log.Errorf("Provided memory limit value is invalid") } + k3sNodeLabelsFlag, err := cmd.Flags().GetStringSlice("k3s-node-label") + if err != nil { + log.Errorln("No node-label specified") + log.Fatalln(err) + } + + k3sNodeLabels := make(map[string]string, len(k3sNodeLabelsFlag)) + for _, label := range k3sNodeLabelsFlag { + labelSplitted := strings.Split(label, "=") + if len(labelSplitted) != 2 { + log.Fatalf("unknown label format format: %s, use format \"foo=bar\"", label) + } + k3sNodeLabels[labelSplitted[0]] = labelSplitted[1] + } + // generate list of nodes nodes := []*k3d.Node{} for i := 0; i < replicas; i++ { @@ -134,8 +152,9 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl Labels: map[string]string{ k3d.LabelRole: roleStr, }, - Restart: true, - Memory: memory, + K3sNodeLabels: k3sNodeLabels, + Restart: true, + Memory: memory, } nodes = append(nodes, node) } diff --git a/pkg/client/node.go b/pkg/client/node.go index c59dbc7b..ebbf28f0 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -492,6 +492,11 @@ func patchAgentSpec(node *k3d.Node) error { if node.Cmd == nil { node.Cmd = []string{"agent"} } + + for k, v := range node.K3sNodeLabels { + node.Args = append(node.Args, "--node-label", fmt.Sprintf("%s=%s", k, v)) + } + return nil } diff --git a/pkg/types/types.go b/pkg/types/types.go index 832146e9..655047ad 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -330,25 +330,26 @@ type NodeIP struct { // Node describes a k3d node type Node struct { - Name string `yaml:"name" json:"name,omitempty"` - Role Role `yaml:"role" json:"role,omitempty"` - Image string `yaml:"image" json:"image,omitempty"` - Volumes []string `yaml:"volumes" json:"volumes,omitempty"` - Env []string `yaml:"env" json:"env,omitempty"` - Cmd []string // filled automatically based on role - Args []string `yaml:"extraArgs" json:"extraArgs,omitempty"` - Ports nat.PortMap `yaml:"portMappings" json:"portMappings,omitempty"` - Restart bool `yaml:"restart" json:"restart,omitempty"` - Created string `yaml:"created" json:"created,omitempty"` - Labels map[string]string // filled automatically - Networks []string // filled automatically - ExtraHosts []string // filled automatically - ServerOpts ServerOpts `yaml:"serverOpts" json:"serverOpts,omitempty"` - AgentOpts AgentOpts `yaml:"agentOpts" json:"agentOpts,omitempty"` - GPURequest string // filled automatically - Memory string // filled automatically - State NodeState // filled automatically - IP NodeIP // filled automatically + Name string `yaml:"name" json:"name,omitempty"` + Role Role `yaml:"role" json:"role,omitempty"` + Image string `yaml:"image" json:"image,omitempty"` + Volumes []string `yaml:"volumes" json:"volumes,omitempty"` + Env []string `yaml:"env" json:"env,omitempty"` + Cmd []string // filled automatically based on role + Args []string `yaml:"extraArgs" json:"extraArgs,omitempty"` + Ports nat.PortMap `yaml:"portMappings" json:"portMappings,omitempty"` + Restart bool `yaml:"restart" json:"restart,omitempty"` + Created string `yaml:"created" json:"created,omitempty"` + Labels map[string]string // filled automatically + K3sNodeLabels map[string]string `yaml:"k3sNodeLabels" json:"k3sNodeLabels,omitempty"` + Networks []string // filled automatically + ExtraHosts []string // filled automatically + ServerOpts ServerOpts `yaml:"serverOpts" json:"serverOpts,omitempty"` + AgentOpts AgentOpts `yaml:"agentOpts" json:"agentOpts,omitempty"` + GPURequest string // filled automatically + Memory string // filled automatically + State NodeState // filled automatically + IP NodeIP // filled automatically } // ServerOpts describes some additional server role specific opts From 99fe3c75c46cbf67bdae220accfb200c05bbe1f4 Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Fri, 14 May 2021 14:01:25 +0200 Subject: [PATCH 02/46] [BREAKING] Config File Enhancements: v1alpha3, migrations, generic k3s-args (#605) Excerpt: - new version v1alpha3 with k3s extraArgs using node filters - reflected in CLI via --k3s-arg - new migration option to migrate (internally and via cli) from v1alpha2 to v1alpha3 - enhancements to how config files are being read - e2e tests for config file migration --- cmd/cluster/clusterCreate.go | 86 ++++-- cmd/config/config.go | 1 + cmd/config/configInit.go | 2 +- cmd/config/configMigrate.go | 111 ++++++++ docgen/README.md | 2 + docgen/go.mod | 4 - docgen/go.sum | 57 +--- docs/usage/commands/k3d_cluster_create.md | 8 +- docs/usage/commands/k3d_config.md | 1 + .../{k3d_docgen.md => k3d_config_migrate.md} | 10 +- docs/usage/commands/k3d_node_create.md | 17 +- docs/usage/configfile.md | 15 +- docs/usage/guides/registries.md | 2 +- pkg/client/cluster.go | 2 +- pkg/config/config.go | 67 +++-- pkg/config/config_test.go | 27 +- pkg/config/jsonschema_test.go | 6 +- pkg/config/merge.go | 2 +- pkg/config/merge_test.go | 9 +- pkg/config/migrate.go | 40 +++ pkg/config/process.go | 2 +- pkg/config/process_test.go | 5 +- .../test_assets/config_test_cluster.yaml | 2 +- .../test_assets/config_test_cluster_list.yaml | 2 +- .../test_assets/config_test_simple.yaml | 13 +- .../test_assets/config_test_simple_2.yaml | 2 +- .../config_test_simple_invalid_servers.yaml | 9 +- .../test_assets/config_test_unknown.yaml | 2 +- pkg/config/transform.go | 22 +- pkg/config/transform_test.go | 5 +- pkg/config/types/types.go | 34 +++ pkg/config/v1alpha2/types.go | 99 ++++--- pkg/config/v1alpha3/migrations.go | 84 ++++++ pkg/config/v1alpha3/schema.json | 254 ++++++++++++++++++ pkg/config/v1alpha3/types.go | 203 ++++++++++++++ pkg/config/validate.go | 2 +- pkg/config/validate_test.go | 2 +- pkg/types/types.go | 2 - tests/assets/config_test_simple.yaml | 9 +- ...config_test_simple_migration_v1alpha2.yaml | 51 ++++ ...config_test_simple_migration_v1alpha3.yaml | 52 ++++ tests/test_config_file_migration.sh | 27 ++ 42 files changed, 1141 insertions(+), 211 deletions(-) create mode 100644 cmd/config/configMigrate.go rename docs/usage/commands/{k3d_docgen.md => k3d_config_migrate.md} (63%) create mode 100644 pkg/config/migrate.go create mode 100644 pkg/config/types/types.go create mode 100644 pkg/config/v1alpha3/migrations.go create mode 100644 pkg/config/v1alpha3/schema.json create mode 100644 pkg/config/v1alpha3/types.go create mode 100755 tests/assets/config_test_simple_migration_v1alpha2.yaml create mode 100755 tests/assets/config_test_simple_migration_v1alpha3.yaml create mode 100755 tests/test_config_file_migration.sh diff --git a/cmd/cluster/clusterCreate.go b/cmd/cluster/clusterCreate.go index f5fc4af5..574db83d 100644 --- a/cmd/cluster/clusterCreate.go +++ b/cmd/cluster/clusterCreate.go @@ -38,7 +38,7 @@ import ( cliutil "github.com/rancher/k3d/v4/cmd/util" k3dCluster "github.com/rancher/k3d/v4/pkg/client" "github.com/rancher/k3d/v4/pkg/config" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" k3d "github.com/rancher/k3d/v4/pkg/types" "github.com/rancher/k3d/v4/version" @@ -77,11 +77,6 @@ func initConfig() { if _, err := os.Stat(configFile); err != nil { log.Fatalf("Failed to stat config file %s: %+v", configFile, err) } - log.Tracef("Schema: %+v", conf.JSONSchema) - - if err := config.ValidateSchemaFile(configFile, []byte(conf.JSONSchema)); err != nil { - log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err) - } // try to read config into memory (viper map structure) if err := cfgViper.ReadInConfig(); err != nil { @@ -92,7 +87,16 @@ func initConfig() { log.Fatalf("Failed to read config file %s: %+v", configFile, err) } - log.Infof("Using config file %s", cfgViper.ConfigFileUsed()) + schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion")) + if err != nil { + log.Fatalf("Cannot validate config file %s: %+v", configFile, err) + } + + if err := config.ValidateSchemaFile(configFile, schema); err != nil { + log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err) + } + + log.Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind"))) } if log.GetLevel() >= log.DebugLevel { c, _ := yaml.Marshal(cfgViper.AllSettings()) @@ -121,19 +125,35 @@ func NewCmdClusterCreate() *cobra.Command { /************************* * Compute Configuration * *************************/ - cfg, err := config.FromViperSimple(cfgViper) + if cfgViper.GetString("apiversion") == "" { + cfgViper.Set("apiversion", config.DefaultConfigApiVersion) + } + if cfgViper.GetString("kind") == "" { + cfgViper.Set("kind", "Simple") + } + cfg, err := config.FromViper(cfgViper) if err != nil { log.Fatalln(err) } - log.Debugf("========== Simple Config ==========\n%+v\n==========================\n", cfg) + if cfg.GetAPIVersion() != config.DefaultConfigApiVersion { + log.Warnf("Default config apiVersion is '%s', but you're using '%s': consider migrating.", config.DefaultConfigApiVersion, cfg.GetAPIVersion()) + cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion) + if err != nil { + log.Fatalln(err) + } + } + + simpleCfg := cfg.(conf.SimpleConfig) + + log.Debugf("========== Simple Config ==========\n%+v\n==========================\n", simpleCfg) - cfg, err = applyCLIOverrides(cfg) + simpleCfg, err = applyCLIOverrides(simpleCfg) if err != nil { log.Fatalf("Failed to apply CLI overrides: %+v", err) } - log.Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", cfg) + log.Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", simpleCfg) /************************************** * Transform, Process & Validate Configuration * @@ -141,10 +161,10 @@ func NewCmdClusterCreate() *cobra.Command { // Set the name if len(args) != 0 { - cfg.Name = args[0] + simpleCfg.Name = args[0] } - clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, cfg) + clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, simpleCfg) if err != nil { log.Fatalln(err) } @@ -178,7 +198,7 @@ func NewCmdClusterCreate() *cobra.Command { if err := k3dCluster.ClusterRun(cmd.Context(), runtimes.SelectedRuntime, clusterConfig); err != nil { // rollback if creation failed log.Errorln(err) - if cfg.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/ + if simpleCfg.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/ log.Fatalln("Cluster creation FAILED, rollback deactivated.") } // rollback if creation failed @@ -202,7 +222,7 @@ func NewCmdClusterCreate() *cobra.Command { if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig { log.Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name) - if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: cfg.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil { + if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: simpleCfg.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil { log.Warningln(err) } } @@ -266,6 +286,10 @@ func NewCmdClusterCreate() *cobra.Command { cmd.Flags().StringArrayP("label", "l", nil, "Add label to node container (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -l \"my.label@agent[0,1]\" -l \"other.label=somevalue@server[0]\"`") _ = ppViper.BindPFlag("cli.labels", cmd.Flags().Lookup("label")) + /* k3s */ + cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server[0]\"") + _ = cfgViper.BindPFlag("cli.k3sargs", cmd.Flags().Lookup("k3s-arg")) + /****************** * "Normal" Flags * ****************** @@ -340,13 +364,6 @@ func NewCmdClusterCreate() *cobra.Command { cmd.Flags().String("registry-config", "", "Specify path to an extra registries.yaml file") _ = cfgViper.BindPFlag("registries.config", cmd.Flags().Lookup("registry-config")) - /* k3s */ - cmd.Flags().StringArray("k3s-server-arg", nil, "Additional args passed to the `k3s server` command on server nodes (new flag per arg)") - _ = cfgViper.BindPFlag("options.k3s.extraserverargs", cmd.Flags().Lookup("k3s-server-arg")) - - cmd.Flags().StringArray("k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on agent nodes (new flag per arg)") - _ = cfgViper.BindPFlag("options.k3s.extraagentargs", cmd.Flags().Lookup("k3s-agent-arg")) - /* Subcommands */ // done @@ -520,5 +537,30 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) { log.Tracef("EnvFilterMap: %+v", envFilterMap) + // --k3s-arg + argFilterMap := make(map[string][]string, 1) + for _, argFlag := range ppViper.GetStringSlice("cli.k3sargs") { + + // split node filter from the specified arg + arg, filters, err := cliutil.SplitFiltersFromFlag(argFlag) + if err != nil { + log.Fatalln(err) + } + + // create new entry or append filter to existing entry + if _, exists := argFilterMap[arg]; exists { + argFilterMap[arg] = append(argFilterMap[arg], filters...) + } else { + argFilterMap[arg] = filters + } + } + + for arg, nodeFilters := range argFilterMap { + cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, conf.K3sArgWithNodeFilters{ + Arg: arg, + NodeFilters: nodeFilters, + }) + } + return cfg, nil } diff --git a/cmd/config/config.go b/cmd/config/config.go index 2a49986a..7156c576 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -41,6 +41,7 @@ func NewCmdConfig() *cobra.Command { } cmd.AddCommand(NewCmdConfigInit()) + cmd.AddCommand(NewCmdConfigMigrate()) return cmd } diff --git a/cmd/config/configInit.go b/cmd/config/configInit.go index 34b27b91..b85b754b 100644 --- a/cmd/config/configInit.go +++ b/cmd/config/configInit.go @@ -25,7 +25,7 @@ import ( "fmt" "os" - config "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + config "github.com/rancher/k3d/v4/pkg/config/v1alpha3" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) diff --git a/cmd/config/configMigrate.go b/cmd/config/configMigrate.go new file mode 100644 index 00000000..e18b19bc --- /dev/null +++ b/cmd/config/configMigrate.go @@ -0,0 +1,111 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package config + +import ( + "os" + "strings" + + "github.com/rancher/k3d/v4/pkg/config" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "gopkg.in/yaml.v2" +) + +// NewCmdConfigMigrate returns a new cobra command +func NewCmdConfigMigrate() *cobra.Command { + + cmd := &cobra.Command{ + Use: "migrate INPUT OUTPUT", + Args: cobra.RangeArgs(1, 2), + Run: func(cmd *cobra.Command, args []string) { + + configFile := args[0] + + if _, err := os.Stat(configFile); err != nil { + log.Fatalf("Failed to stat config file %s: %+v", configFile, err) + } + + cfgViper := viper.New() + cfgViper.SetConfigType("yaml") + + cfgViper.SetConfigFile(configFile) + + // try to read config into memory (viper map structure) + if err := cfgViper.ReadInConfig(); err != nil { + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + log.Fatalf("Config file %s not found: %+v", configFile, err) + } + // config file found but some other error happened + log.Fatalf("Failed to read config file %s: %+v", configFile, err) + } + + schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion")) + if err != nil { + log.Fatalf("Cannot validate config file %s: %+v", configFile, err) + } + + if err := config.ValidateSchemaFile(configFile, schema); err != nil { + log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err) + } + + log.Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind"))) + + cfg, err := config.FromViper(cfgViper) + if err != nil { + log.Fatalln(err) + } + + if cfg.GetAPIVersion() != config.DefaultConfigApiVersion { + cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion) + if err != nil { + log.Fatalln(err) + } + } + + yamlout, err := yaml.Marshal(cfg) + if err != nil { + log.Fatalln(err) + } + + output := "-" + + if len(args) > 1 { + output = args[1] + } + + if output == "-" { + if _, err := os.Stdout.Write(yamlout); err != nil { + log.Fatalln(err) + } + } else { + if err := os.WriteFile(output, yamlout, os.ModeAppend); err != nil { + log.Fatalln(err) + } + } + + }, + } + + return cmd +} diff --git a/docgen/README.md b/docgen/README.md index b65264ef..26f1be62 100644 --- a/docgen/README.md +++ b/docgen/README.md @@ -6,6 +6,8 @@ The code will output files in [`../docs/usage/commands/`](../docs/usage/commands ## Run +- may required a `replace github.com/rancher/k3d/v4 => PATH/TO/LOCAL/REPO` in the `go.mod` + ```bash # ensure that you're in the docgen dir, as the relative path to the docs/ dir is hardcoded cd docgen diff --git a/docgen/go.mod b/docgen/go.mod index 8b20fcae..dc3a123f 100644 --- a/docgen/go.mod +++ b/docgen/go.mod @@ -7,11 +7,8 @@ require ( github.com/containerd/cgroups v0.0.0-20210414074453-680c246289fb // indirect github.com/containerd/containerd v1.5.0-rc.1 // indirect github.com/containerd/continuity v0.0.0-20210315143101-93e15499afd5 // indirect - github.com/docker/cli v20.10.6+incompatible // indirect - github.com/docker/docker v20.10.6+incompatible // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect github.com/magiconair/properties v1.8.5 // indirect github.com/mitchellh/mapstructure v1.4.1 // indirect github.com/pelletier/go-toml v1.9.0 // indirect @@ -26,7 +23,6 @@ require ( google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3 // indirect google.golang.org/grpc v1.37.0 // indirect gopkg.in/ini.v1 v1.62.0 // indirect - k8s.io/client-go v0.21.0 // indirect k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.1.1 // indirect ) diff --git a/docgen/go.sum b/docgen/go.sum index 4cb02b63..93a5ce4e 100644 --- a/docgen/go.sum +++ b/docgen/go.sum @@ -39,7 +39,6 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= @@ -212,7 +211,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= @@ -227,17 +225,15 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v20.10.5+incompatible h1:bjflayQbWg+xOkF2WPEAOi4Y7zWhR7ptoPhV/VqLVDE= github.com/docker/cli v20.10.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.6+incompatible h1:LAyI6Lnwv+AUjtp2ZyN1lxqXBtkeFUqm4H7CZMWZuP8= -github.com/docker/cli v20.10.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20171011171712-7484e51bf6af/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.5+incompatible h1:o5WL5onN4awYGwrW7+oTn5x9AF2prw7V0Ox8ZEkoCdg= github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= -github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -253,7 +249,6 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dvyukov/go-fuzz v0.0.0-20201127111758-49e582c6c23d/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -286,9 +281,8 @@ github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTD github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -458,9 +452,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= @@ -494,12 +487,12 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= @@ -537,7 +530,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= @@ -564,8 +556,6 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+ github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -660,8 +650,6 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= @@ -795,11 +783,6 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org/intern v0.0.0-20210108033219-3eb7198706b2 h1:VFTf+jjIgsldaz/Mr00VaCSswHJrI2hIjQygE/W4IMg= -go4.org/intern v0.0.0-20210108033219-3eb7198706b2/go.mod h1:vLqJ+12kCw61iCWsPto0EOHhBS+o4rO5VIucbc9g2Cc= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222175341-b30ae309168e/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063 h1:1tk03FUNpulq2cuWpXZWj649rwJpk0d20rxWiopKRmc= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -815,7 +798,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -892,7 +875,6 @@ golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1 h1:4qWs8cYYH6PoEFy4dfhDFgoMGkwAcETd+MmPdCPMzUc= @@ -985,7 +967,6 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -993,9 +974,7 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210414055047-fe65e336abe0 h1:g9s1Ppvvun/fI+BptTMj909BBIcGrzQ32k9FNlcevOE= golang.org/x/sys v0.0.0-20210414055047-fe65e336abe0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 h1:VqE9gduFZ4dbR7XoL77lHFp0/DyDUBKSXK7CMFkVcV0= golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1012,9 +991,8 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1176,9 +1154,8 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -1218,22 +1195,17 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -inet.af/netaddr v0.0.0-20210403172118-1e1430f727e0 h1:ANl7piXB3SHmhwTNeTO0yl0yf4gO3/aaFjcBCdH9Ftg= -inet.af/netaddr v0.0.0-20210403172118-1e1430f727e0/go.mod h1:I2i9ONCXRZDnG1+7O8fSuYzjcPxHQXrIfzD/IkR87x4= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y= -k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4 h1:vhxQ0PPUUU2Ns1b9r4/UFp13UPs8cw2iOoTjnY9faa0= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA= -k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= -k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= @@ -1241,11 +1213,9 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 h1:u5rPykqiCpL+LBfjRkXvnK71gOgIdmq3eHUEkPrbeTI= @@ -1258,7 +1228,6 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.1 h1:nYqY2A6oy37sKLYuSBXuQhbj4JVclzJK13BOIvJG5XU= sigs.k8s.io/structured-merge-diff/v4 v4.1.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/docs/usage/commands/k3d_cluster_create.md b/docs/usage/commands/k3d_cluster_create.md index ff4ea16b..f784462d 100644 --- a/docs/usage/commands/k3d_cluster_create.md +++ b/docs/usage/commands/k3d_cluster_create.md @@ -25,16 +25,16 @@ k3d cluster create NAME [flags] - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550` -c, --config string Path of a config file to use -e, --env KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add environment variables to nodes (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] - - Example: `k3d cluster create --agents 2 -e "HTTP_PROXY=my.proxy.com" -e "SOME_KEY=SOME_VAL@server[0]"` + - Example: `k3d cluster create --agents 2 -e "HTTP_PROXY=my.proxy.com@server[0]" -e "SOME_KEY=SOME_VAL@server[0]"` --gpus string GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker] -h, --help help for create -i, --image string Specify k3s image that you want to use for the nodes - --k3s-agent-arg k3s agent Additional args passed to the k3s agent command on agent nodes (new flag per arg) - --k3s-server-arg k3s server Additional args passed to the k3s server command on server nodes (new flag per arg) + --k3s-arg ARG@NODEFILTER[;@NODEFILTER] Additional args passed to k3s command (Format: ARG@NODEFILTER[;@NODEFILTER]) + - Example: `k3d cluster create --k3s-arg "--disable=traefik@server[0]" --kubeconfig-switch-context Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default) (default true) --kubeconfig-update-default Directly update the default kubeconfig with the new cluster's context (default true) -l, --label KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add label to node container (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] - - Example: `k3d cluster create --agents 2 -l "my.label@agent[0,1]" -v "other.label=somevalue@server[0]"` + - Example: `k3d cluster create --agents 2 -l "my.label@agent[0,1]" -l "other.label=somevalue@server[0]"` --network string Join an existing network --no-hostip Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS --no-image-volume Disable the creation of a volume for importing images diff --git a/docs/usage/commands/k3d_config.md b/docs/usage/commands/k3d_config.md index c42923e5..8f8f076e 100644 --- a/docs/usage/commands/k3d_config.md +++ b/docs/usage/commands/k3d_config.md @@ -28,4 +28,5 @@ k3d config [flags] * [k3d](k3d.md) - https://k3d.io/ -> Run k3s in Docker! * [k3d config init](k3d_config_init.md) - +* [k3d config migrate](k3d_config_migrate.md) - diff --git a/docs/usage/commands/k3d_docgen.md b/docs/usage/commands/k3d_config_migrate.md similarity index 63% rename from docs/usage/commands/k3d_docgen.md rename to docs/usage/commands/k3d_config_migrate.md index 4767bdb6..902687ad 100644 --- a/docs/usage/commands/k3d_docgen.md +++ b/docs/usage/commands/k3d_config_migrate.md @@ -1,15 +1,15 @@ -## k3d docgen +## k3d config migrate + -Generate command docs ``` -k3d docgen [flags] +k3d config migrate INPUT OUTPUT [flags] ``` ### Options ``` - -h, --help help for docgen + -h, --help help for migrate ``` ### Options inherited from parent commands @@ -22,5 +22,5 @@ k3d docgen [flags] ### SEE ALSO -* [k3d](k3d.md) - https://k3d.io/ -> Run k3s in Docker! +* [k3d config](k3d_config.md) - Work with config file(s) diff --git a/docs/usage/commands/k3d_node_create.md b/docs/usage/commands/k3d_node_create.md index 041bb5cb..808cbe13 100644 --- a/docs/usage/commands/k3d_node_create.md +++ b/docs/usage/commands/k3d_node_create.md @@ -13,14 +13,15 @@ k3d node create NAME [flags] ### Options ``` - -c, --cluster string Select the cluster that the node shall connect to. (default "k3s-default") - -h, --help help for create - -i, --image string Specify k3s image used for the node(s) (default "docker.io/rancher/k3s:v1.20.0-k3s2") - --memory string Memory limit imposed on the node [From docker] - --replicas int Number of replicas of this node specification. (default 1) - --role string Specify node role [server, agent] (default "agent") - --timeout duration Maximum waiting time for '--wait' before canceling/returning. - --wait Wait for the node(s) to be ready before returning. + -c, --cluster string Select the cluster that the node shall connect to. (default "k3s-default") + -h, --help help for create + -i, --image string Specify k3s image used for the node(s) (default "docker.io/rancher/k3s:v1.20.0-k3s2") + --k3s-node-label strings Specify k3s node labels in format "foo=bar" + --memory string Memory limit imposed on the node [From docker] + --replicas int Number of replicas of this node specification. (default 1) + --role string Specify node role [server, agent] (default "agent") + --timeout duration Maximum waiting time for '--wait' before canceling/returning. + --wait Wait for the node(s) to be ready before returning. ``` ### Options inherited from parent commands diff --git a/docs/usage/configfile.md b/docs/usage/configfile.md index 051b4edb..97fa45c0 100644 --- a/docs/usage/configfile.md +++ b/docs/usage/configfile.md @@ -25,13 +25,13 @@ Using a config file is as easy as putting it in a well-known place in your file As of the time of writing this documentation, the config file only **requires** you to define two fields: -- `apiVersion` to match the version of the config file that you want to use (at this time it would be `apiVersion: k3d.io/v1alpha2`) +- `apiVersion` to match the version of the config file that you want to use (at this time it would be `apiVersion: k3d.io/v1alpha3`) - `kind` to define the kind of config file that you want to use (currently we only have the `Simple` config) So this would be the minimal config file, which configures absolutely nothing: ```yaml -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple ``` @@ -43,7 +43,7 @@ Currently, the config file is still in an Alpha-State, meaning, that it is subje !!! info "Validation via JSON-Schema" k3d uses a [JSON-Schema](https://json-schema.org/) to describe the expected format and fields of the configuration file. This schema is also used to [validate](https://github.com/xeipuuv/gojsonschema#validation) a user-given config file. - This JSON-Schema can be found in the specific config version sub-directory in the repository (e.g. [here for `v1alpha2`](https://github.com/rancher/k3d/blob/main/pkg/config/v1alpha2/schema.json)) and could be used to lookup supported fields or by linters to validate the config file, e.g. in your code editor. + This JSON-Schema can be found in the specific config version sub-directory in the repository (e.g. [here for `v1alpha3`](https://github.com/rancher/k3d/blob/main/pkg/config/v1alpha3/schema.json)) and could be used to lookup supported fields or by linters to validate the config file, e.g. in your code editor. ### All Options: Example @@ -51,7 +51,7 @@ Since the config options and the config file are changing quite a bit, it's hard ```yaml # k3d configuration file, saved as e.g. /home/me/myk3dcluster.yaml -apiVersion: k3d.io/v1alpha2 # this will change in the future as we make everything more stable +apiVersion: k3d.io/v1alpha3 # this will change in the future as we make everything more stable kind: Simple # internally, we also have a Cluster config, which is not yet available externally name: mycluster # name that you want to give to your cluster (will still be prefixed with `k3d-`) servers: 1 # same as `--servers 1` @@ -98,9 +98,10 @@ options: disableRollback: false # same as `--no-Rollback` disableHostIPInjection: false # same as `--no-hostip` k3s: # options passed on to K3s itself - extraServerArgs: # additional arguments passed to the `k3s server` command; same as `--k3s-server-arg` - - --tls-san=my.host.domain - extraAgentArgs: [] # addditional arguments passed to the `k3s agent` command; same as `--k3s-agent-arg` + extraArgs: # additional arguments passed to the `k3s server|agent` command; same as `--k3s-arg` + - arg: --tls-san=my.host.domain + nodeFilters: + - server[*] kubeconfig: updateDefaultKubeconfig: true # add new cluster to your default Kubeconfig; same as `--kubeconfig-update-default` (default: true) switchCurrentContext: true # also set current-context to the new cluster's context; same as `--kubeconfig-switch-context` (default: true) diff --git a/docs/usage/guides/registries.md b/docs/usage/guides/registries.md index 04411dc8..11a485ab 100644 --- a/docs/usage/guides/registries.md +++ b/docs/usage/guides/registries.md @@ -29,7 +29,7 @@ This file can also be used for providing additional information necessary for ac If you're using a `SimpleConfig` file to configure your k3d cluster, you may as well embed the registries.yaml in there directly: ```yaml -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple name: test servers: 1 diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index ec6cf801..4aa29fcb 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -36,7 +36,7 @@ import ( "github.com/docker/go-connections/nat" "github.com/imdario/mergo" "github.com/rancher/k3d/v4/pkg/actions" - config "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + config "github.com/rancher/k3d/v4/pkg/config/v1alpha3" k3drt "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/runtimes/docker" runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors" diff --git a/pkg/config/config.go b/pkg/config/config.go index 9cf51e23..efd5b267 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -29,43 +29,51 @@ import ( "github.com/spf13/viper" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + "github.com/rancher/k3d/v4/pkg/config/v1alpha3" + defaultConfig "github.com/rancher/k3d/v4/pkg/config/v1alpha3" + + types "github.com/rancher/k3d/v4/pkg/config/types" ) -func FromViperSimple(config *viper.Viper) (conf.SimpleConfig, error) { +const DefaultConfigApiVersion = defaultConfig.ApiVersion - var cfg conf.SimpleConfig +var Schemas = map[string]string{ + v1alpha2.ApiVersion: v1alpha2.JSONSchema, + v1alpha3.ApiVersion: v1alpha3.JSONSchema, +} - // determine config kind - if config.GetString("kind") != "" && strings.ToLower(config.GetString("kind")) != "simple" { - return cfg, fmt.Errorf("Wrong `kind` '%s' != 'simple' in config file", config.GetString("kind")) +func GetSchemaByVersion(apiVersion string) ([]byte, error) { + schema, ok := Schemas[strings.ToLower(apiVersion)] + if !ok { + return nil, fmt.Errorf("unsupported apiVersion '%s'", apiVersion) } + return []byte(schema), nil +} - if err := config.Unmarshal(&cfg); err != nil { - log.Errorln("Failed to unmarshal File config") - - return cfg, err - } +func FromViper(config *viper.Viper) (types.Config, error) { - return cfg, nil -} + var cfg types.Config + var err error -func FromViper(config *viper.Viper) (conf.Config, error) { + apiVersion := strings.ToLower(config.GetString("apiversion")) + kind := strings.ToLower(config.GetString("kind")) - var cfg conf.Config + log.Tracef("Trying to read config apiVersion='%s', kind='%s'", apiVersion, kind) - // determine config kind - switch strings.ToLower(config.GetString("kind")) { - case "simple": - cfg = conf.SimpleConfig{} - case "cluster": - cfg = conf.ClusterConfig{} - case "clusterlist": - cfg = conf.ClusterListConfig{} + switch apiVersion { + case "k3d.io/v1alpha2": + cfg, err = v1alpha2.GetConfigByKind(kind) + case "k3d.io/v1alpha3": + cfg, err = v1alpha3.GetConfigByKind(kind) case "": - return nil, fmt.Errorf("Missing `kind` in config file") + cfg, err = defaultConfig.GetConfigByKind(kind) default: - return nil, fmt.Errorf("Unknown `kind` '%s' in config file", config.GetString("kind")) + return nil, fmt.Errorf("cannot read config with apiversion '%s'", config.GetString("apiversion")) + } + + if err != nil { + return nil, err } if err := config.Unmarshal(&cfg); err != nil { @@ -76,3 +84,12 @@ func FromViper(config *viper.Viper) (conf.Config, error) { return cfg, nil } + +func getMigrations(version string) map[string]func(types.Config) (types.Config, error) { + switch version { + case v1alpha3.ApiVersion: + return v1alpha3.Migrations + default: + return nil + } +} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 9f6e05d7..eb15f152 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -26,7 +26,8 @@ import ( "time" "github.com/go-test/deep" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + configtypes "github.com/rancher/k3d/v4/pkg/config/types" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/spf13/viper" k3d "github.com/rancher/k3d/v4/pkg/types" @@ -39,8 +40,8 @@ func TestReadSimpleConfig(t *testing.T) { exposedAPI.HostPort = "6443" expectedConfig := conf.SimpleConfig{ - TypeMeta: conf.TypeMeta{ - APIVersion: "k3d.io/v1alpha2", + TypeMeta: configtypes.TypeMeta{ + APIVersion: "k3d.io/v1alpha3", Kind: "Simple", }, Name: "test", @@ -83,8 +84,12 @@ func TestReadSimpleConfig(t *testing.T) { DisableImageVolume: false, }, K3sOptions: conf.SimpleConfigOptionsK3s{ - ExtraServerArgs: []string{"--tls-san=127.0.0.1"}, - ExtraAgentArgs: []string{}, + ExtraArgs: []conf.K3sArgWithNodeFilters{ + { + Arg: "--tls-san=127.0.0.1", + NodeFilters: []string{"server[*]"}, + }, + }, }, KubeconfigOptions: conf.SimpleConfigOptionsKubeconfig{ UpdateDefaultKubeconfig: true, @@ -107,7 +112,7 @@ func TestReadSimpleConfig(t *testing.T) { t.Error(err) } - cfg, err := FromViperSimple(config) + cfg, err := FromViper(config) if err != nil { t.Error(err) } @@ -123,8 +128,8 @@ func TestReadSimpleConfig(t *testing.T) { func TestReadClusterConfig(t *testing.T) { expectedConfig := conf.ClusterConfig{ - TypeMeta: conf.TypeMeta{ - APIVersion: "k3d.io/v1alpha2", + TypeMeta: configtypes.TypeMeta{ + APIVersion: "k3d.io/v1alpha3", Kind: "Cluster", }, Cluster: k3d.Cluster{ @@ -168,8 +173,8 @@ func TestReadClusterConfig(t *testing.T) { func TestReadClusterListConfig(t *testing.T) { expectedConfig := conf.ClusterListConfig{ - TypeMeta: conf.TypeMeta{ - APIVersion: "k3d.io/v1alpha2", + TypeMeta: configtypes.TypeMeta{ + APIVersion: "k3d.io/v1alpha3", Kind: "ClusterList", }, Clusters: []k3d.Cluster{ @@ -237,7 +242,7 @@ func TestReadUnknownConfig(t *testing.T) { t.Error(err) } - _, err := FromViperSimple(config) + _, err := FromViper(config) if err == nil { t.Fail() } diff --git a/pkg/config/jsonschema_test.go b/pkg/config/jsonschema_test.go index 5ece4e79..f3ec75b6 100644 --- a/pkg/config/jsonschema_test.go +++ b/pkg/config/jsonschema_test.go @@ -24,14 +24,14 @@ package config import ( "testing" - "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + "github.com/rancher/k3d/v4/pkg/config/v1alpha3" ) func TestValidateSchema(t *testing.T) { cfgPath := "./test_assets/config_test_simple.yaml" - if err := ValidateSchemaFile(cfgPath, []byte(v1alpha2.JSONSchema)); err != nil { + if err := ValidateSchemaFile(cfgPath, []byte(v1alpha3.JSONSchema)); err != nil { t.Errorf("Validation of config file %s against the default schema failed: %+v", cfgPath, err) } @@ -42,7 +42,7 @@ func TestValidateSchemaFail(t *testing.T) { cfgPath := "./test_assets/config_test_simple_invalid_servers.yaml" var err error - if err = ValidateSchemaFile(cfgPath, []byte(v1alpha2.JSONSchema)); err == nil { + if err = ValidateSchemaFile(cfgPath, []byte(v1alpha3.JSONSchema)); err == nil { t.Errorf("Validation of config file %s against the default schema passed where we expected a failure", cfgPath) } diff --git a/pkg/config/merge.go b/pkg/config/merge.go index 390269dc..c86c0eaa 100644 --- a/pkg/config/merge.go +++ b/pkg/config/merge.go @@ -24,7 +24,7 @@ package config import ( "github.com/imdario/mergo" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" log "github.com/sirupsen/logrus" ) diff --git a/pkg/config/merge_test.go b/pkg/config/merge_test.go index 349f7d4b..cfb73b72 100644 --- a/pkg/config/merge_test.go +++ b/pkg/config/merge_test.go @@ -25,7 +25,8 @@ package config import ( "testing" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + configtypes "github.com/rancher/k3d/v4/pkg/config/types" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/spf13/viper" "gotest.tools/assert" ) @@ -34,7 +35,7 @@ func TestMergeSimpleConfig(t *testing.T) { srcConfig := "./test_assets/config_test_simple.yaml" destConfig := "./test_assets/config_test_simple_2.yaml" - var src, dest conf.Config + var src, dest configtypes.Config var err error cfg1 := viper.New() @@ -45,11 +46,11 @@ func TestMergeSimpleConfig(t *testing.T) { cfg2.SetConfigFile(destConfig) _ = cfg2.ReadInConfig() - if src, err = FromViperSimple(cfg1); err != nil { + if src, err = FromViper(cfg1); err != nil { t.Fatal(err) } - if dest, err = FromViperSimple(cfg2); err != nil { + if dest, err = FromViper(cfg2); err != nil { t.Fatal(err) } diff --git a/pkg/config/migrate.go b/pkg/config/migrate.go new file mode 100644 index 00000000..28fb5d1f --- /dev/null +++ b/pkg/config/migrate.go @@ -0,0 +1,40 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +package config + +import ( + "fmt" + + types "github.com/rancher/k3d/v4/pkg/config/types" +) + +func Migrate(config types.Config, targetVersion string) (types.Config, error) { + + migration, ok := getMigrations(targetVersion)[config.GetAPIVersion()] + if !ok { + return nil, fmt.Errorf("no migration possible from '%s' to '%s'", config.GetAPIVersion(), targetVersion) + } + + return migration(config) + +} diff --git a/pkg/config/process.go b/pkg/config/process.go index 199a77cf..d67d55ae 100644 --- a/pkg/config/process.go +++ b/pkg/config/process.go @@ -23,7 +23,7 @@ THE SOFTWARE. package config import ( - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" log "github.com/sirupsen/logrus" ) diff --git a/pkg/config/process_test.go b/pkg/config/process_test.go index 5326ca53..c4d890ed 100644 --- a/pkg/config/process_test.go +++ b/pkg/config/process_test.go @@ -26,6 +26,7 @@ import ( "context" "testing" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/spf13/viper" "gotest.tools/assert" @@ -38,14 +39,14 @@ func TestProcessClusterConfig(t *testing.T) { vip.SetConfigFile(cfgFile) _ = vip.ReadInConfig() - cfg, err := FromViperSimple(vip) + cfg, err := FromViper(vip) if err != nil { t.Error(err) } t.Logf("\n========== Read Config and transform to cluster ==========\n%+v\n=================================\n", cfg) - clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, cfg) + clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, cfg.(conf.SimpleConfig)) if err != nil { t.Error(err) } diff --git a/pkg/config/test_assets/config_test_cluster.yaml b/pkg/config/test_assets/config_test_cluster.yaml index e90f30b2..f1a8438c 100644 --- a/pkg/config/test_assets/config_test_cluster.yaml +++ b/pkg/config/test_assets/config_test_cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Cluster name: foo nodes: diff --git a/pkg/config/test_assets/config_test_cluster_list.yaml b/pkg/config/test_assets/config_test_cluster_list.yaml index 0eba2c22..9d2e55ca 100644 --- a/pkg/config/test_assets/config_test_cluster_list.yaml +++ b/pkg/config/test_assets/config_test_cluster_list.yaml @@ -1,5 +1,5 @@ --- -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: ClusterList clusters: - name: foo diff --git a/pkg/config/test_assets/config_test_simple.yaml b/pkg/config/test_assets/config_test_simple.yaml index e264bd9a..4e132176 100644 --- a/pkg/config/test_assets/config_test_simple.yaml +++ b/pkg/config/test_assets/config_test_simple.yaml @@ -1,4 +1,4 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple name: test servers: 1 @@ -25,7 +25,7 @@ env: labels: - label: foo=bar nodeFilters: - - server[0] + - "server[0]" - loadbalancer options: @@ -35,9 +35,10 @@ options: disableLoadbalancer: false disableImageVolume: false k3s: - extraServerArgs: - - --tls-san=127.0.0.1 - extraAgentArgs: [] + extraArgs: + - arg: --tls-san=127.0.0.1 + nodeFilters: + - "server[*]" kubeconfig: updateDefaultKubeconfig: true - switchCurrentContext: true \ No newline at end of file + switchCurrentContext: true diff --git a/pkg/config/test_assets/config_test_simple_2.yaml b/pkg/config/test_assets/config_test_simple_2.yaml index 0d5293ee..a849e322 100644 --- a/pkg/config/test_assets/config_test_simple_2.yaml +++ b/pkg/config/test_assets/config_test_simple_2.yaml @@ -1,4 +1,4 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple name: supertest agents: 8 \ No newline at end of file diff --git a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml index 7f2442c3..b9e75fb6 100644 --- a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml +++ b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml @@ -1,4 +1,4 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple name: 1234 servers: 1 @@ -35,9 +35,10 @@ options: disableLoadbalancer: false disableImageVolume: false k3s: - extraServerArgs: - - --tls-san=127.0.0.1 - extraAgentArgs: [] + extraArgs: + - arg: --tls-san=127.0.0.1 + nodeFilters: + - "server[*]" kubeconfig: updateDefaultKubeconfig: true switchCurrentContext: true \ No newline at end of file diff --git a/pkg/config/test_assets/config_test_unknown.yaml b/pkg/config/test_assets/config_test_unknown.yaml index 356972ce..66fe0c0f 100644 --- a/pkg/config/test_assets/config_test_unknown.yaml +++ b/pkg/config/test_assets/config_test_unknown.yaml @@ -1,3 +1,3 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Unknown foo: bar \ No newline at end of file diff --git a/pkg/config/transform.go b/pkg/config/transform.go index 76f9751e..0ec43686 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -31,7 +31,7 @@ import ( "github.com/docker/go-connections/nat" cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" k3d "github.com/rancher/k3d/v4/pkg/types" "github.com/rancher/k3d/v4/pkg/types/k3s" @@ -117,7 +117,6 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim serverNode := k3d.Node{ Role: k3d.ServerRole, Image: simpleConfig.Image, - Args: simpleConfig.Options.K3sOptions.ExtraServerArgs, ServerOpts: k3d.ServerOpts{}, Memory: simpleConfig.Options.Runtime.ServersMemory, } @@ -135,7 +134,6 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim agentNode := k3d.Node{ Role: k3d.AgentRole, Image: simpleConfig.Image, - Args: simpleConfig.Options.K3sOptions.ExtraAgentArgs, Memory: simpleConfig.Options.Runtime.AgentsMemory, } newCluster.Nodes = append(newCluster.Nodes, &agentNode) @@ -228,6 +226,22 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } } + // -> ARGS + for _, argWithNodeFilters := range simpleConfig.Options.K3sOptions.ExtraArgs { + if len(argWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { + return nil, fmt.Errorf("K3sExtraArg '%s' lacks a node filter, but there's more than one node", argWithNodeFilters.Arg) + } + + nodes, err := util.FilterNodes(nodeList, argWithNodeFilters.NodeFilters) + if err != nil { + return nil, err + } + + for _, node := range nodes { + node.Args = append(node.Args, argWithNodeFilters.Arg) + } + } + /************************** * Cluster Create Options * **************************/ @@ -238,8 +252,6 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim WaitForServer: simpleConfig.Options.K3dOptions.Wait, Timeout: simpleConfig.Options.K3dOptions.Timeout, DisableLoadBalancer: simpleConfig.Options.K3dOptions.DisableLoadbalancer, - K3sServerArgs: simpleConfig.Options.K3sOptions.ExtraServerArgs, - K3sAgentArgs: simpleConfig.Options.K3sOptions.ExtraAgentArgs, GPURequest: simpleConfig.Options.Runtime.GPURequest, ServersMemory: simpleConfig.Options.Runtime.ServersMemory, AgentsMemory: simpleConfig.Options.Runtime.AgentsMemory, diff --git a/pkg/config/transform_test.go b/pkg/config/transform_test.go index 6cfb336d..495f499c 100644 --- a/pkg/config/transform_test.go +++ b/pkg/config/transform_test.go @@ -26,6 +26,7 @@ import ( "context" "testing" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/spf13/viper" ) @@ -37,14 +38,14 @@ func TestTransformSimpleConfigToClusterConfig(t *testing.T) { vip.SetConfigFile(cfgFile) _ = vip.ReadInConfig() - cfg, err := FromViperSimple(vip) + cfg, err := FromViper(vip) if err != nil { t.Error(err) } t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", cfg) - clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, cfg) + clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, cfg.(conf.SimpleConfig)) if err != nil { t.Error(err) } diff --git a/pkg/config/types/types.go b/pkg/config/types/types.go new file mode 100644 index 00000000..ff0e26e5 --- /dev/null +++ b/pkg/config/types/types.go @@ -0,0 +1,34 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package types + +// TypeMeta is basically copied from https://github.com/kubernetes/apimachinery/blob/a3b564b22db316a41e94fdcffcf9995424fe924c/pkg/apis/meta/v1/types.go#L36-L56 +type TypeMeta struct { + Kind string `mapstructure:"kind,omitempty" yaml:"kind,omitempty" json:"kind,omitempty"` + APIVersion string `mapstructure:"apiVersion,omitempty" yaml:"apiVersion,omitempty" json:"apiVersion,omitempty"` +} + +// Config interface. +type Config interface { + GetKind() string + GetAPIVersion() string +} diff --git a/pkg/config/v1alpha2/types.go b/pkg/config/v1alpha2/types.go index 0679cfd4..9f2375d1 100644 --- a/pkg/config/v1alpha2/types.go +++ b/pkg/config/v1alpha2/types.go @@ -27,6 +27,7 @@ import ( "fmt" "time" + configtypes "github.com/rancher/k3d/v4/pkg/config/types" k3d "github.com/rancher/k3d/v4/pkg/types" "github.com/rancher/k3d/v4/version" ) @@ -35,9 +36,11 @@ import ( //go:embed schema.json var JSONSchema string +const ApiVersion = "k3d.io/v1alpha2" + // DefaultConfigTpl for printing const DefaultConfigTpl = `--- -apiVersion: k3d.io/v1alpha2 +apiVersion: %s kind: Simple name: %s servers: 1 @@ -48,21 +51,11 @@ image: %s // DefaultConfig templated DefaultConfigTpl var DefaultConfig = fmt.Sprintf( DefaultConfigTpl, + ApiVersion, k3d.DefaultClusterName, fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), ) -// TypeMeta is basically copied from https://github.com/kubernetes/apimachinery/blob/a3b564b22db316a41e94fdcffcf9995424fe924c/pkg/apis/meta/v1/types.go#L36-L56 -type TypeMeta struct { - Kind string `mapstructure:"kind,omitempty" yaml:"kind,omitempty" json:"kind,omitempty"` - APIVersion string `mapstructure:"apiVersion,omitempty" yaml:"apiVersion,omitempty" json:"apiVersion,omitempty"` -} - -// Config interface. -type Config interface { - GetKind() string -} - type VolumeWithNodeFilters struct { Volume string `mapstructure:"volume" yaml:"volume" json:"volume,omitempty"` NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` @@ -119,21 +112,21 @@ type SimpleConfigOptionsK3s struct { // SimpleConfig describes the toplevel k3d configuration file. type SimpleConfig struct { - TypeMeta `mapstructure:",squash" yaml:",inline"` - Name string `mapstructure:"name" yaml:"name" json:"name,omitempty"` - Servers int `mapstructure:"servers" yaml:"servers" json:"servers,omitempty"` //nolint:lll // default 1 - Agents int `mapstructure:"agents" yaml:"agents" json:"agents,omitempty"` //nolint:lll // default 0 - ExposeAPI SimpleExposureOpts `mapstructure:"kubeAPI" yaml:"kubeAPI" json:"kubeAPI,omitempty"` - Image string `mapstructure:"image" yaml:"image" json:"image,omitempty"` - Network string `mapstructure:"network" yaml:"network" json:"network,omitempty"` - Subnet string `mapstructure:"subnet" yaml:"subnet" json:"subnet,omitempty"` - ClusterToken string `mapstructure:"token" yaml:"clusterToken" json:"clusterToken,omitempty"` // default: auto-generated - Volumes []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes" json:"volumes,omitempty"` - Ports []PortWithNodeFilters `mapstructure:"ports" yaml:"ports" json:"ports,omitempty"` - Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels" json:"labels,omitempty"` - Options SimpleConfigOptions `mapstructure:"options" yaml:"options" json:"options,omitempty"` - Env []EnvVarWithNodeFilters `mapstructure:"env" yaml:"env" json:"env,omitempty"` - Registries struct { + configtypes.TypeMeta `mapstructure:",squash" yaml:",inline"` + Name string `mapstructure:"name" yaml:"name" json:"name,omitempty"` + Servers int `mapstructure:"servers" yaml:"servers" json:"servers,omitempty"` //nolint:lll // default 1 + Agents int `mapstructure:"agents" yaml:"agents" json:"agents,omitempty"` //nolint:lll // default 0 + ExposeAPI SimpleExposureOpts `mapstructure:"kubeAPI" yaml:"kubeAPI" json:"kubeAPI,omitempty"` + Image string `mapstructure:"image" yaml:"image" json:"image,omitempty"` + Network string `mapstructure:"network" yaml:"network" json:"network,omitempty"` + Subnet string `mapstructure:"subnet" yaml:"subnet" json:"subnet,omitempty"` + ClusterToken string `mapstructure:"token" yaml:"clusterToken" json:"clusterToken,omitempty"` // default: auto-generated + Volumes []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes" json:"volumes,omitempty"` + Ports []PortWithNodeFilters `mapstructure:"ports" yaml:"ports" json:"ports,omitempty"` + Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels" json:"labels,omitempty"` + Options SimpleConfigOptions `mapstructure:"options" yaml:"options" json:"options,omitempty"` + Env []EnvVarWithNodeFilters `mapstructure:"env" yaml:"env" json:"env,omitempty"` + Registries struct { Use []string `mapstructure:"use" yaml:"use,omitempty" json:"use,omitempty"` Create bool `mapstructure:"create" yaml:"create,omitempty" json:"create,omitempty"` Config string `mapstructure:"config" yaml:"config,omitempty" json:"config,omitempty"` // registries.yaml (k3s config for containerd registry override) @@ -147,30 +140,60 @@ type SimpleExposureOpts struct { HostPort string `mapstructure:"hostPort" yaml:"hostPort,omitempty" json:"hostPort,omitempty"` } -// GetKind implements Config.GetKind +// Kind implements Config.Kind func (c SimpleConfig) GetKind() string { - return "Cluster" + return "Simple" +} + +func (c SimpleConfig) GetAPIVersion() string { + return ApiVersion } // ClusterConfig describes a single cluster config type ClusterConfig struct { - TypeMeta `mapstructure:",squash" yaml:",inline"` - Cluster k3d.Cluster `mapstructure:",squash" yaml:",inline"` - ClusterCreateOpts k3d.ClusterCreateOpts `mapstructure:"options" yaml:"options"` - KubeconfigOpts SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig"` + configtypes.TypeMeta `mapstructure:",squash" yaml:",inline"` + Cluster k3d.Cluster `mapstructure:",squash" yaml:",inline"` + ClusterCreateOpts k3d.ClusterCreateOpts `mapstructure:"options" yaml:"options"` + KubeconfigOpts SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig"` } -// GetKind implements Config.GetKind +// Kind implements Config.Kind func (c ClusterConfig) GetKind() string { - return "Cluster" + return "Simple" +} + +func (c ClusterConfig) GetAPIVersion() string { + return ApiVersion } // ClusterListConfig describes a list of clusters type ClusterListConfig struct { - TypeMeta `mapstructure:",squash" yaml:",inline"` - Clusters []k3d.Cluster `mapstructure:"clusters" yaml:"clusters"` + configtypes.TypeMeta `mapstructure:",squash" yaml:",inline"` + Clusters []k3d.Cluster `mapstructure:"clusters" yaml:"clusters"` } func (c ClusterListConfig) GetKind() string { - return "ClusterList" + return "Simple" +} + +func (c ClusterListConfig) GetAPIVersion() string { + return ApiVersion +} + +func GetConfigByKind(kind string) (configtypes.Config, error) { + + // determine config kind + switch kind { + case "simple": + return SimpleConfig{}, nil + case "cluster": + return ClusterConfig{}, nil + case "clusterlist": + return ClusterListConfig{}, nil + case "": + return nil, fmt.Errorf("missing `kind` in config file") + default: + return nil, fmt.Errorf("unknown `kind` '%s' in config file", kind) + } + } diff --git a/pkg/config/v1alpha3/migrations.go b/pkg/config/v1alpha3/migrations.go new file mode 100644 index 00000000..5c894ca1 --- /dev/null +++ b/pkg/config/v1alpha3/migrations.go @@ -0,0 +1,84 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +package v1alpha3 + +import ( + "encoding/json" + + configtypes "github.com/rancher/k3d/v4/pkg/config/types" + "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + log "github.com/sirupsen/logrus" +) + +var Migrations = map[string]func(configtypes.Config) (configtypes.Config, error){ + v1alpha2.ApiVersion: MigrateV1Alpha2, +} + +func MigrateV1Alpha2(input configtypes.Config) (configtypes.Config, error) { + log.Debugln("Migrating v1alpha2 to v1alpha3") + + injson, err := json.Marshal(input) + if err != nil { + return nil, err + } + + if input.GetKind() == "Simple" { + cfg := SimpleConfig{} + + if err := json.Unmarshal(injson, &cfg); err != nil { + return nil, err + } + + cfg.Options.K3sOptions.ExtraArgs = []K3sArgWithNodeFilters{} + + for _, arg := range input.(v1alpha2.SimpleConfig).Options.K3sOptions.ExtraServerArgs { + cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, K3sArgWithNodeFilters{ + Arg: arg, + NodeFilters: []string{ + "server[*]", + }, + }) + } + + for _, arg := range input.(v1alpha2.SimpleConfig).Options.K3sOptions.ExtraAgentArgs { + cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, K3sArgWithNodeFilters{ + Arg: arg, + NodeFilters: []string{ + "agent[*]", + }, + }) + } + + cfg.APIVersion = ApiVersion + + log.Debugf("Migrated config: %+v", cfg) + + return cfg, nil + + } + + log.Debugf("No migration needed for %s#%s -> %s#%s", input.GetAPIVersion(), input.GetKind(), ApiVersion, input.GetKind()) + + return input, nil + +} diff --git a/pkg/config/v1alpha3/schema.json b/pkg/config/v1alpha3/schema.json new file mode 100644 index 00000000..2b07c02c --- /dev/null +++ b/pkg/config/v1alpha3/schema.json @@ -0,0 +1,254 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "SimpleConfig", + "type": "object", + "required": [ + "apiVersion", + "kind" + ], + "properties": { + "apiVersion": { + "type": "string", + "enum": [ + "k3d.io/v1alpha3" + ], + "default": "k3d.io/v1alpha3" + }, + "kind": { + "type": "string", + "enum": [ + "Simple" + ], + "default": "Simple" + }, + "name": { + "description": "Name of the cluster (must be a valid hostname and will be prefixed with 'k3d-'). Example: 'mycluster'.", + "type": "string", + "format": "hostname" + }, + "servers": { + "type": "number", + "minimum": 1 + }, + "agents": { + "type": "number", + "minimum": 0 + }, + "kubeAPI": { + "type": "object", + "properties": { + "host": { + "type": "string", + "format": "hostname" + }, + "hostIP": { + "type": "string", + "format": "ipv4", + "examples": [ + "0.0.0.0", + "192.168.178.55" + ] + }, + "hostPort": { + "type":"string", + "examples": [ + "6443" + ] + } + }, + "additionalProperties": false + }, + "image": { + "type": "string", + "examples": [ + "rancher/k3s:latest" + ] + }, + "network": { + "type": "string" + }, + "subnet": { + "type": "string", + "default": "auto", + "examples": [ + "172.28.0.0/16", + "192.162.0.0/16" + ] + }, + "token": { + "type": "string" + }, + "volumes": { + "type": "array", + "items": { + "type": "object", + "properties": { + "volume": { + "type": "string" + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + }, + "ports": { + "type": "array", + "items": { + "type": "object", + "properties": { + "port": { + "type": "string" + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + }, + "labels": { + "type": "array", + "items": { + "type": "object", + "properties": { + "label": { + "type": "string" + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + }, + "options": { + "type": "object", + "properties": { + "k3d": { + "type": "object", + "properties": { + "wait": { + "type": "boolean", + "default": true + }, + "timeout": { + "type": "string", + "examples": [ + "60s", + "1m", + "1m30s" + ] + }, + "disableLoadbalancer": { + "type": "boolean", + "default": false + }, + "disableImageVolume": { + "type": "boolean", + "default": false + }, + "disableRollback": { + "type": "boolean", + "default": false + }, + "disableHostIPInjection": { + "type": "boolean", + "default": false + } + }, + "additionalProperties": false + }, + "k3s": { + "type": "object", + "properties": { + "extraArgs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "arg": { + "type": "string", + "examples": [ + "--tls-san=127.0.0.1", + "--disable=traefik" + ] + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "kubeconfig": { + "type": "object", + "properties": { + "updateDefaultKubeconfig": { + "type": "boolean", + "default": true + }, + "switchCurrentContext": { + "type": "boolean", + "default": true + } + }, + "additionalProperties": false + }, + "runtime": { + "type": "object", + "properties": { + "gpuRequest": { + "type": "string" + }, + "serversMemory": { + "type": "string" + }, + "agentsMemory": { + "type": "string" + } + } + } + }, + "additionalProperties": false + }, + "env": { + "type": "array", + "items": { + "type": "object", + "properties": { + "envVar": { + "type": "string" + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + }, + "registries": { + "type": "object" + } + }, + "additionalProperties": false, + "definitions": { + "nodeFilters": { + "type": "array", + "items": { + "type": "string" + }, + "examples": [ + "loadbalancer", + "server[*]", + "server[0]", + "agent[1]", + "all" + ] + } + } +} \ No newline at end of file diff --git a/pkg/config/v1alpha3/types.go b/pkg/config/v1alpha3/types.go new file mode 100644 index 00000000..22465c71 --- /dev/null +++ b/pkg/config/v1alpha3/types.go @@ -0,0 +1,203 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +package v1alpha3 + +import ( + _ "embed" + "fmt" + "strings" + "time" + + config "github.com/rancher/k3d/v4/pkg/config/types" + k3d "github.com/rancher/k3d/v4/pkg/types" + "github.com/rancher/k3d/v4/version" +) + +const ApiVersion = "k3d.io/v1alpha3" + +// JSONSchema describes the schema used to validate config files +//go:embed schema.json +var JSONSchema string + +// DefaultConfigTpl for printing +const DefaultConfigTpl = `--- +apiVersion: k3d.io/v1alpha3 +kind: Simple +name: %s +servers: 1 +agents: 0 +image: %s +` + +// DefaultConfig templated DefaultConfigTpl +var DefaultConfig = fmt.Sprintf( + DefaultConfigTpl, + k3d.DefaultClusterName, + fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), +) + +type VolumeWithNodeFilters struct { + Volume string `mapstructure:"volume" yaml:"volume" json:"volume,omitempty"` + NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` +} + +type PortWithNodeFilters struct { + Port string `mapstructure:"port" yaml:"port" json:"port,omitempty"` + NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` +} + +type LabelWithNodeFilters struct { + Label string `mapstructure:"label" yaml:"label" json:"label,omitempty"` + NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` +} + +type EnvVarWithNodeFilters struct { + EnvVar string `mapstructure:"envVar" yaml:"envVar" json:"envVar,omitempty"` + NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` +} + +type K3sArgWithNodeFilters struct { + Arg string `mapstructure:"arg" yaml:"arg" json:"arg,omitempty"` + NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` +} + +// SimpleConfigOptionsKubeconfig describes the set of options referring to the kubeconfig during cluster creation. +type SimpleConfigOptionsKubeconfig struct { + UpdateDefaultKubeconfig bool `mapstructure:"updateDefaultKubeconfig" yaml:"updateDefaultKubeconfig" json:"updateDefaultKubeconfig,omitempty"` // default: true + SwitchCurrentContext bool `mapstructure:"switchCurrentContext" yaml:"switchCurrentContext" json:"switchCurrentContext,omitempty"` //nolint:lll // default: true +} + +type SimpleConfigOptions struct { + K3dOptions SimpleConfigOptionsK3d `mapstructure:"k3d" yaml:"k3d"` + K3sOptions SimpleConfigOptionsK3s `mapstructure:"k3s" yaml:"k3s"` + KubeconfigOptions SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig"` + Runtime SimpleConfigOptionsRuntime `mapstructure:"runtime" yaml:"runtime"` +} + +type SimpleConfigOptionsRuntime struct { + GPURequest string `mapstructure:"gpuRequest" yaml:"gpuRequest"` + ServersMemory string `mapstructure:"serversMemory" yaml:"serversMemory"` + AgentsMemory string `mapstructure:"agentsMemory" yaml:"agentsMemory"` +} + +type SimpleConfigOptionsK3d struct { + Wait bool `mapstructure:"wait" yaml:"wait"` + Timeout time.Duration `mapstructure:"timeout" yaml:"timeout"` + DisableLoadbalancer bool `mapstructure:"disableLoadbalancer" yaml:"disableLoadbalancer"` + DisableImageVolume bool `mapstructure:"disableImageVolume" yaml:"disableImageVolume"` + NoRollback bool `mapstructure:"disableRollback" yaml:"disableRollback"` + PrepDisableHostIPInjection bool `mapstructure:"disableHostIPInjection" yaml:"disableHostIPInjection"` + NodeHookActions []k3d.NodeHookAction `mapstructure:"nodeHookActions" yaml:"nodeHookActions,omitempty"` +} + +type SimpleConfigOptionsK3s struct { + ExtraArgs []K3sArgWithNodeFilters `mapstructure:"extraArgs" yaml:"extraArgs"` +} + +// SimpleConfig describes the toplevel k3d configuration file. +type SimpleConfig struct { + config.TypeMeta `mapstructure:",squash" yaml:",inline"` + Name string `mapstructure:"name" yaml:"name" json:"name,omitempty"` + Servers int `mapstructure:"servers" yaml:"servers" json:"servers,omitempty"` //nolint:lll // default 1 + Agents int `mapstructure:"agents" yaml:"agents" json:"agents,omitempty"` //nolint:lll // default 0 + ExposeAPI SimpleExposureOpts `mapstructure:"kubeAPI" yaml:"kubeAPI" json:"kubeAPI,omitempty"` + Image string `mapstructure:"image" yaml:"image" json:"image,omitempty"` + Network string `mapstructure:"network" yaml:"network" json:"network,omitempty"` + Subnet string `mapstructure:"subnet" yaml:"subnet" json:"subnet,omitempty"` + ClusterToken string `mapstructure:"token" yaml:"clusterToken" json:"clusterToken,omitempty"` // default: auto-generated + Volumes []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes" json:"volumes,omitempty"` + Ports []PortWithNodeFilters `mapstructure:"ports" yaml:"ports" json:"ports,omitempty"` + Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels" json:"labels,omitempty"` + Options SimpleConfigOptions `mapstructure:"options" yaml:"options" json:"options,omitempty"` + Env []EnvVarWithNodeFilters `mapstructure:"env" yaml:"env" json:"env,omitempty"` + Registries struct { + Use []string `mapstructure:"use" yaml:"use,omitempty" json:"use,omitempty"` + Create bool `mapstructure:"create" yaml:"create,omitempty" json:"create,omitempty"` + Config string `mapstructure:"config" yaml:"config,omitempty" json:"config,omitempty"` // registries.yaml (k3s config for containerd registry override) + } `mapstructure:"registries" yaml:"registries,omitempty" json:"registries,omitempty"` +} + +// SimpleExposureOpts provides a simplified syntax compared to the original k3d.ExposureOpts +type SimpleExposureOpts struct { + Host string `mapstructure:"host" yaml:"host,omitempty" json:"host,omitempty"` + HostIP string `mapstructure:"hostIP" yaml:"hostIP,omitempty" json:"hostIP,omitempty"` + HostPort string `mapstructure:"hostPort" yaml:"hostPort,omitempty" json:"hostPort,omitempty"` +} + +// GetKind implements Config.GetKind +func (c SimpleConfig) GetKind() string { + return "Simple" +} + +func (c SimpleConfig) GetAPIVersion() string { + return ApiVersion +} + +// ClusterConfig describes a single cluster config +type ClusterConfig struct { + config.TypeMeta `mapstructure:",squash" yaml:",inline"` + Cluster k3d.Cluster `mapstructure:",squash" yaml:",inline"` + ClusterCreateOpts k3d.ClusterCreateOpts `mapstructure:"options" yaml:"options"` + KubeconfigOpts SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig"` +} + +// GetKind implements Config.GetKind +func (c ClusterConfig) GetKind() string { + return "Simple" +} + +func (c ClusterConfig) GetAPIVersion() string { + return ApiVersion +} + +// ClusterListConfig describes a list of clusters +type ClusterListConfig struct { + config.TypeMeta `mapstructure:",squash" yaml:",inline"` + Clusters []k3d.Cluster `mapstructure:"clusters" yaml:"clusters"` +} + +func (c ClusterListConfig) GetKind() string { + return "Simple" +} + +func (c ClusterListConfig) GetAPIVersion() string { + return ApiVersion +} + +func GetConfigByKind(kind string) (config.Config, error) { + + // determine config kind + switch strings.ToLower(kind) { + case "simple": + return SimpleConfig{}, nil + case "cluster": + return ClusterConfig{}, nil + case "clusterlist": + return ClusterListConfig{}, nil + case "": + return nil, fmt.Errorf("missing `kind` in config file") + default: + return nil, fmt.Errorf("unknown `kind` '%s' in config file", kind) + } + +} diff --git a/pkg/config/validate.go b/pkg/config/validate.go index bb81ba41..c647e4f5 100644 --- a/pkg/config/validate.go +++ b/pkg/config/validate.go @@ -27,7 +27,7 @@ import ( "time" k3dc "github.com/rancher/k3d/v4/pkg/client" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" runtimeutil "github.com/rancher/k3d/v4/pkg/runtimes/util" k3d "github.com/rancher/k3d/v4/pkg/types" diff --git a/pkg/config/validate_test.go b/pkg/config/validate_test.go index 45e736e3..88177808 100644 --- a/pkg/config/validate_test.go +++ b/pkg/config/validate_test.go @@ -26,7 +26,7 @@ import ( "context" "testing" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/spf13/viper" ) diff --git a/pkg/types/types.go b/pkg/types/types.go index 655047ad..e31141ec 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -184,8 +184,6 @@ type ClusterCreateOpts struct { WaitForServer bool `yaml:"waitForServer" json:"waitForServer,omitempty"` Timeout time.Duration `yaml:"timeout" json:"timeout,omitempty"` DisableLoadBalancer bool `yaml:"disableLoadbalancer" json:"disableLoadbalancer,omitempty"` - K3sServerArgs []string `yaml:"k3sServerArgs" json:"k3sServerArgs,omitempty"` - K3sAgentArgs []string `yaml:"k3sAgentArgs" json:"k3sAgentArgs,omitempty"` GPURequest string `yaml:"gpuRequest" json:"gpuRequest,omitempty"` ServersMemory string `yaml:"serversMemory" json:"serversMemory,omitempty"` AgentsMemory string `yaml:"agentsMemory" json:"agentsMemory,omitempty"` diff --git a/tests/assets/config_test_simple.yaml b/tests/assets/config_test_simple.yaml index 4d9f7255..c1d05e2b 100755 --- a/tests/assets/config_test_simple.yaml +++ b/tests/assets/config_test_simple.yaml @@ -1,4 +1,4 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple name: test servers: 3 @@ -43,9 +43,10 @@ options: disableLoadbalancer: false disableImageVolume: false k3s: - extraServerArgs: - - --tls-san=127.0.0.1 - extraAgentArgs: [] + extraArgs: + - arg: --tls-san=127.0.0.1 + nodeFilters: + - server[*] kubeconfig: updateDefaultKubeconfig: true switchCurrentContext: true \ No newline at end of file diff --git a/tests/assets/config_test_simple_migration_v1alpha2.yaml b/tests/assets/config_test_simple_migration_v1alpha2.yaml new file mode 100755 index 00000000..4d9f7255 --- /dev/null +++ b/tests/assets/config_test_simple_migration_v1alpha2.yaml @@ -0,0 +1,51 @@ +apiVersion: k3d.io/v1alpha2 +kind: Simple +name: test +servers: 3 +agents: 2 +kubeAPI: + hostIP: "0.0.0.0" + hostPort: "6446" +image: rancher/k3s:latest +volumes: + - volume: /my/path:/some/path + nodeFilters: + - all +ports: + - port: 80:80 + nodeFilters: + - loadbalancer + - port: 0.0.0.0:443:443 + nodeFilters: + - loadbalancer +env: + - envVar: bar=baz,bob + nodeFilters: + - all +labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer +registries: + create: true + use: [] + config: | + mirrors: + "my.company.registry": + endpoint: + - http://my.company.registry:5000 + +options: + k3d: + wait: true + timeout: "360s" # should be pretty high for multi-server clusters to allow for a proper startup routine + disableLoadbalancer: false + disableImageVolume: false + k3s: + extraServerArgs: + - --tls-san=127.0.0.1 + extraAgentArgs: [] + kubeconfig: + updateDefaultKubeconfig: true + switchCurrentContext: true \ No newline at end of file diff --git a/tests/assets/config_test_simple_migration_v1alpha3.yaml b/tests/assets/config_test_simple_migration_v1alpha3.yaml new file mode 100755 index 00000000..c1d05e2b --- /dev/null +++ b/tests/assets/config_test_simple_migration_v1alpha3.yaml @@ -0,0 +1,52 @@ +apiVersion: k3d.io/v1alpha3 +kind: Simple +name: test +servers: 3 +agents: 2 +kubeAPI: + hostIP: "0.0.0.0" + hostPort: "6446" +image: rancher/k3s:latest +volumes: + - volume: /my/path:/some/path + nodeFilters: + - all +ports: + - port: 80:80 + nodeFilters: + - loadbalancer + - port: 0.0.0.0:443:443 + nodeFilters: + - loadbalancer +env: + - envVar: bar=baz,bob + nodeFilters: + - all +labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer +registries: + create: true + use: [] + config: | + mirrors: + "my.company.registry": + endpoint: + - http://my.company.registry:5000 + +options: + k3d: + wait: true + timeout: "360s" # should be pretty high for multi-server clusters to allow for a proper startup routine + disableLoadbalancer: false + disableImageVolume: false + k3s: + extraArgs: + - arg: --tls-san=127.0.0.1 + nodeFilters: + - server[*] + kubeconfig: + updateDefaultKubeconfig: true + switchCurrentContext: true \ No newline at end of file diff --git a/tests/test_config_file_migration.sh b/tests/test_config_file_migration.sh new file mode 100755 index 00000000..8f4be093 --- /dev/null +++ b/tests/test_config_file_migration.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; } + +# shellcheck source=./common.sh +source "$CURR_DIR/common.sh" + + +export CURRENT_STAGE="Test | config-file-migration" + + + +highlight "[START] ConfigMigrateTest" + +tempdir=$(mktemp -d) +$EXE config migrate "$CURR_DIR/assets/config_test_simple_migration_v1alpha2.yaml" "$tempdir/expected.yaml" || failed "failed on $CURR_DIR/assets/config_test_simple.yaml" +$EXE config migrate "$CURR_DIR/assets/config_test_simple_migration_v1alpha3.yaml" "$tempdir/actual.yaml" || failed "failed on $CURR_DIR/assets/config_test_simple_migrate.yaml" + +diff "$tempdir/actual.yaml" "$tempdir/expected.yaml" || failed "config migration failed" && passed "config migration succeeded" + + +highlight "[DONE] ConfigMigrateTest" + +exit 0 + + From 1deb0aa64d27e03d73a91538c9b410ae99ca4290 Mon Sep 17 00:00:00 2001 From: ejose19 <8742215+ejose19@users.noreply.github.com> Date: Wed, 19 May 2021 09:20:47 -0300 Subject: [PATCH 03/46] [Enhancement] Refactoring: normalize label flags (k3s node & runtime) (#598, @ejose19) --- cmd/cluster/clusterCreate.go | 57 +++++++++++++++---- cmd/node/nodeCreate.go | 37 +++++++++--- cmd/node/nodeList.go | 2 +- cmd/registry/registryList.go | 4 +- cmd/util/runtimeLabels.go | 35 ++++++++++++ pkg/client/cluster.go | 40 ++++++------- pkg/client/kubeconfig.go | 8 +-- pkg/client/node.go | 40 ++++++------- pkg/client/registry.go | 10 ++-- pkg/config/config_test.go | 20 +++++-- .../test_assets/config_test_simple.yaml | 16 ++++-- .../config_test_simple_invalid_servers.yaml | 18 ++++-- pkg/config/transform.go | 44 ++++++++++---- pkg/config/v1alpha3/migrations.go | 9 +++ pkg/config/v1alpha3/schema.json | 52 ++++++++++------- pkg/config/v1alpha3/types.go | 11 ++-- pkg/runtimes/docker/container.go | 2 +- pkg/runtimes/docker/network.go | 2 +- pkg/runtimes/docker/node.go | 2 +- pkg/runtimes/docker/translate.go | 44 +++++++------- pkg/runtimes/docker/translate_test.go | 6 +- pkg/runtimes/docker/util.go | 2 +- pkg/runtimes/docker/volume.go | 4 +- pkg/tools/tools.go | 24 ++++---- pkg/types/types.go | 10 ++-- tests/assets/config_test_simple.yaml | 18 ++++-- ...config_test_simple_migration_v1alpha3.yaml | 13 +++-- tests/common.sh | 6 ++ tests/test_config_file.sh | 6 +- tests/test_config_with_overrides.sh | 4 ++ 30 files changed, 361 insertions(+), 185 deletions(-) create mode 100644 cmd/util/runtimeLabels.go diff --git a/cmd/cluster/clusterCreate.go b/cmd/cluster/clusterCreate.go index 574db83d..89a2fbae 100644 --- a/cmd/cluster/clusterCreate.go +++ b/cmd/cluster/clusterCreate.go @@ -283,8 +283,11 @@ func NewCmdClusterCreate() *cobra.Command { cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`") _ = ppViper.BindPFlag("cli.ports", cmd.Flags().Lookup("port")) - cmd.Flags().StringArrayP("label", "l", nil, "Add label to node container (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -l \"my.label@agent[0,1]\" -l \"other.label=somevalue@server[0]\"`") - _ = ppViper.BindPFlag("cli.labels", cmd.Flags().Lookup("label")) + cmd.Flags().StringArrayP("k3s-node-label", "", nil, "Add label to k3s node (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --k3s-node-label \"my.label@agent[0,1]\" --k3s-node-label \"other.label=somevalue@server[0]\"`") + _ = ppViper.BindPFlag("cli.k3s-node-labels", cmd.Flags().Lookup("k3s-node-label")) + + cmd.Flags().StringArrayP("runtime-label", "", nil, "Add label to container runtime (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --runtime-label \"my.label@agent[0,1]\" --runtime-label \"other.label=somevalue@server[0]\"`") + _ = ppViper.BindPFlag("cli.runtime-labels", cmd.Flags().Lookup("runtime-label")) /* k3s */ cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server[0]\"") @@ -481,10 +484,38 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) { log.Tracef("PortFilterMap: %+v", portFilterMap) - // --label - // labelFilterMap will add container label to applied node filters - labelFilterMap := make(map[string][]string, 1) - for _, labelFlag := range ppViper.GetStringSlice("cli.labels") { + // --k3s-node-label + // k3sNodeLabelFilterMap will add k3s node label to applied node filters + k3sNodeLabelFilterMap := make(map[string][]string, 1) + for _, labelFlag := range ppViper.GetStringSlice("cli.k3s-node-labels") { + + // split node filter from the specified label + label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag) + if err != nil { + log.Fatalln(err) + } + + // create new entry or append filter to existing entry + if _, exists := k3sNodeLabelFilterMap[label]; exists { + k3sNodeLabelFilterMap[label] = append(k3sNodeLabelFilterMap[label], nodeFilters...) + } else { + k3sNodeLabelFilterMap[label] = nodeFilters + } + } + + for label, nodeFilters := range k3sNodeLabelFilterMap { + cfg.Options.K3sOptions.NodeLabels = append(cfg.Options.K3sOptions.NodeLabels, conf.LabelWithNodeFilters{ + Label: label, + NodeFilters: nodeFilters, + }) + } + + log.Tracef("K3sNodeLabelFilterMap: %+v", k3sNodeLabelFilterMap) + + // --runtime-label + // runtimeLabelFilterMap will add container runtime label to applied node filters + runtimeLabelFilterMap := make(map[string][]string, 1) + for _, labelFlag := range ppViper.GetStringSlice("cli.runtime-labels") { // split node filter from the specified label label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag) @@ -492,22 +523,24 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) { log.Fatalln(err) } + cliutil.ValidateRuntimeLabelKey(strings.Split(label, "=")[0]) + // create new entry or append filter to existing entry - if _, exists := labelFilterMap[label]; exists { - labelFilterMap[label] = append(labelFilterMap[label], nodeFilters...) + if _, exists := runtimeLabelFilterMap[label]; exists { + runtimeLabelFilterMap[label] = append(runtimeLabelFilterMap[label], nodeFilters...) } else { - labelFilterMap[label] = nodeFilters + runtimeLabelFilterMap[label] = nodeFilters } } - for label, nodeFilters := range labelFilterMap { - cfg.Labels = append(cfg.Labels, conf.LabelWithNodeFilters{ + for label, nodeFilters := range runtimeLabelFilterMap { + cfg.Options.Runtime.Labels = append(cfg.Options.Runtime.Labels, conf.LabelWithNodeFilters{ Label: label, NodeFilters: nodeFilters, }) } - log.Tracef("LabelFilterMap: %+v", labelFilterMap) + log.Tracef("RuntimeLabelFilterMap: %+v", runtimeLabelFilterMap) // --env // envFilterMap will add container env vars to applied node filters diff --git a/cmd/node/nodeCreate.go b/cmd/node/nodeCreate.go index cc7c2b10..8169170c 100644 --- a/cmd/node/nodeCreate.go +++ b/cmd/node/nodeCreate.go @@ -30,6 +30,7 @@ import ( dockerunits "github.com/docker/go-units" "github.com/rancher/k3d/v4/cmd/util" + cliutil "github.com/rancher/k3d/v4/cmd/util" k3dc "github.com/rancher/k3d/v4/pkg/client" "github.com/rancher/k3d/v4/pkg/runtimes" k3d "github.com/rancher/k3d/v4/pkg/types" @@ -74,6 +75,7 @@ func NewCmdNodeCreate() *cobra.Command { cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", false, "Wait for the node(s) to be ready before returning.") cmd.Flags().DurationVar(&createNodeOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.") + cmd.Flags().StringSliceP("runtime-label", "", []string{}, "Specify container runtime labels in format \"foo=bar\"") cmd.Flags().StringSliceP("k3s-node-label", "", []string{}, "Specify k3s node labels in format \"foo=bar\"") // done @@ -127,9 +129,30 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl log.Errorf("Provided memory limit value is invalid") } + // --runtime-label + runtimeLabelsFlag, err := cmd.Flags().GetStringSlice("runtime-label") + if err != nil { + log.Errorln("No runtime-label specified") + log.Fatalln(err) + } + + runtimeLabels := make(map[string]string, len(runtimeLabelsFlag)+1) + for _, label := range runtimeLabelsFlag { + labelSplitted := strings.Split(label, "=") + if len(labelSplitted) != 2 { + log.Fatalf("unknown runtime-label format format: %s, use format \"foo=bar\"", label) + } + cliutil.ValidateRuntimeLabelKey(labelSplitted[0]) + runtimeLabels[labelSplitted[0]] = labelSplitted[1] + } + + // Internal k3d runtime labels take precedence over user-defined labels + runtimeLabels[k3d.LabelRole] = roleStr + + // --k3s-node-label k3sNodeLabelsFlag, err := cmd.Flags().GetStringSlice("k3s-node-label") if err != nil { - log.Errorln("No node-label specified") + log.Errorln("No k3s-node-label specified") log.Fatalln(err) } @@ -137,7 +160,7 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl for _, label := range k3sNodeLabelsFlag { labelSplitted := strings.Split(label, "=") if len(labelSplitted) != 2 { - log.Fatalf("unknown label format format: %s, use format \"foo=bar\"", label) + log.Fatalf("unknown k3s-node-label format format: %s, use format \"foo=bar\"", label) } k3sNodeLabels[labelSplitted[0]] = labelSplitted[1] } @@ -146,13 +169,11 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl nodes := []*k3d.Node{} for i := 0; i < replicas; i++ { node := &k3d.Node{ - Name: fmt.Sprintf("%s-%s-%d", k3d.DefaultObjectNamePrefix, args[0], i), - Role: role, - Image: image, - Labels: map[string]string{ - k3d.LabelRole: roleStr, - }, + Name: fmt.Sprintf("%s-%s-%d", k3d.DefaultObjectNamePrefix, args[0], i), + Role: role, + Image: image, K3sNodeLabels: k3sNodeLabels, + RuntimeLabels: runtimeLabels, Restart: true, Memory: memory, } diff --git a/cmd/node/nodeList.go b/cmd/node/nodeList.go index fd0698f8..c4abde01 100644 --- a/cmd/node/nodeList.go +++ b/cmd/node/nodeList.go @@ -88,7 +88,7 @@ func NewCmdNodeList() *cobra.Command { fmt.Fprintf(tabwriter, "%s\t%s\t%s\t%s\n", strings.TrimPrefix(node.Name, "/"), string(node.Role), - node.Labels[k3d.LabelClusterName], + node.RuntimeLabels[k3d.LabelClusterName], node.State.Status) })) }, diff --git a/cmd/registry/registryList.go b/cmd/registry/registryList.go index f41d5f89..30fa66f0 100644 --- a/cmd/registry/registryList.go +++ b/cmd/registry/registryList.go @@ -88,8 +88,8 @@ func NewCmdRegistryList() *cobra.Command { util.PrintNodes(existingNodes, registryListFlags.output, headers, util.NodePrinterFunc(func(tabwriter *tabwriter.Writer, node *k3d.Node) { cluster := "*" - if _, ok := node.Labels[k3d.LabelClusterName]; ok { - cluster = node.Labels[k3d.LabelClusterName] + if _, ok := node.RuntimeLabels[k3d.LabelClusterName]; ok { + cluster = node.RuntimeLabels[k3d.LabelClusterName] } fmt.Fprintf(tabwriter, "%s\t%s\t%s\t%s\n", strings.TrimPrefix(node.Name, "/"), diff --git a/cmd/util/runtimeLabels.go b/cmd/util/runtimeLabels.go new file mode 100644 index 00000000..e603cef1 --- /dev/null +++ b/cmd/util/runtimeLabels.go @@ -0,0 +1,35 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package util + +import ( + "strings" + + log "github.com/sirupsen/logrus" +) + +// validateRuntimeLabelKey validates a given label key is not reserved for internal k3d usage +func ValidateRuntimeLabelKey(labelKey string) { + if strings.HasPrefix(labelKey, "k3s.") || strings.HasPrefix(labelKey, "k3d.") || labelKey == "app" { + log.Fatalf("runtime label \"%s\" is reserved for internal usage", labelKey) + } +} diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index 4aa29fcb..e04484c1 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -381,13 +381,13 @@ ClusterCreatOpts: nodeSetup := func(node *k3d.Node, suffix int) error { // cluster specific settings - if node.Labels == nil { - node.Labels = make(map[string]string) // TODO: maybe create an init function? + if node.RuntimeLabels == nil { + node.RuntimeLabels = make(map[string]string) // TODO: maybe create an init function? } // ensure global labels for k, v := range clusterCreateOpts.GlobalLabels { - node.Labels[k] = v + node.RuntimeLabels[k] = v } // ensure global env @@ -404,7 +404,7 @@ ClusterCreatOpts: cluster.Network.IPAM.IPsUsed = append(cluster.Network.IPAM.IPsUsed, ip) // make sure that we're not reusing the same IP next time node.IP.Static = true node.IP.IP = ip - node.Labels[k3d.LabelNodeStaticIP] = ip.String() + node.RuntimeLabels[k3d.LabelNodeStaticIP] = ip.String() } node.ServerOpts.KubeAPI = cluster.KubeAPI @@ -412,7 +412,7 @@ ClusterCreatOpts: // the cluster has an init server node, but its not this one, so connect it to the init node if cluster.InitNode != nil && !node.ServerOpts.IsInit { node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL)) - node.Labels[k3d.LabelServerIsInit] = "false" // set label, that this server node is not the init server + node.RuntimeLabels[k3d.LabelServerIsInit] = "false" // set label, that this server node is not the init server } } else if node.Role == k3d.AgentRole { @@ -446,10 +446,10 @@ ClusterCreatOpts: if cluster.InitNode != nil { log.Infoln("Creating initializing server node") cluster.InitNode.Args = append(cluster.InitNode.Args, "--cluster-init") - if cluster.InitNode.Labels == nil { - cluster.InitNode.Labels = map[string]string{} + if cluster.InitNode.RuntimeLabels == nil { + cluster.InitNode.RuntimeLabels = map[string]string{} } - cluster.InitNode.Labels[k3d.LabelServerIsInit] = "true" // set label, that this server node is the init server + cluster.InitNode.RuntimeLabels[k3d.LabelServerIsInit] = "true" // set label, that this server node is the init server // in case the LoadBalancer was disabled, expose the API Port on the initializing server node if clusterCreateOpts.DisableLoadBalancer { @@ -547,10 +547,10 @@ ClusterCreatOpts: fmt.Sprintf("PORTS=%s", strings.Join(ports, ",")), fmt.Sprintf("WORKER_PROCESSES=%d", len(ports)), }, - Role: k3d.LoadBalancerRole, - Labels: clusterCreateOpts.GlobalLabels, // TODO: createLoadBalancer: add more expressive labels - Networks: []string{cluster.Network.Name}, - Restart: true, + Role: k3d.LoadBalancerRole, + RuntimeLabels: clusterCreateOpts.GlobalLabels, // TODO: createLoadBalancer: add more expressive labels + Networks: []string{cluster.Network.Name}, + Restart: true, } if len(udp_ports) > 0 { lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) @@ -673,7 +673,7 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus // ClusterList returns a list of all existing clusters func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, error) { log.Traceln("Listing Clusters...") - nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultObjectLabels) + nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultRuntimeLabels) if err != nil { log.Errorln("Failed to get clusters") return nil, err @@ -691,7 +691,7 @@ func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, er log.Tracef("Found %d cluster-internal nodes", len(nodes)) if log.GetLevel() == log.TraceLevel { for _, node := range nodes { - log.Tracef("Found cluster-internal node %s of role %s belonging to cluster %s", node.Name, node.Role, node.Labels[k3d.LabelClusterName]) + log.Tracef("Found cluster-internal node %s of role %s belonging to cluster %s", node.Name, node.Role, node.RuntimeLabels[k3d.LabelClusterName]) } } @@ -700,7 +700,7 @@ func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, er for _, node := range nodes { clusterExists := false for _, cluster := range clusters { - if node.Labels[k3d.LabelClusterName] == cluster.Name { // TODO: handle case, where this label doesn't exist + if node.RuntimeLabels[k3d.LabelClusterName] == cluster.Name { // TODO: handle case, where this label doesn't exist cluster.Nodes = append(cluster.Nodes, node) clusterExists = true break @@ -709,7 +709,7 @@ func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, er // cluster is not in the list yet, so we add it with the current node as its first member if !clusterExists { clusters = append(clusters, &k3d.Cluster{ - Name: node.Labels[k3d.LabelClusterName], + Name: node.RuntimeLabels[k3d.LabelClusterName], Nodes: []*k3d.Node{node}, }) } @@ -734,7 +734,7 @@ func populateClusterFieldsFromLabels(cluster *k3d.Cluster) error { // get the name of the cluster network if cluster.Network.Name == "" { - if networkName, ok := node.Labels[k3d.LabelNetwork]; ok { + if networkName, ok := node.RuntimeLabels[k3d.LabelNetwork]; ok { cluster.Network.Name = networkName } } @@ -742,7 +742,7 @@ func populateClusterFieldsFromLabels(cluster *k3d.Cluster) error { // check if the network is external // since the struct value is a bool, initialized as false, we cannot check if it's unset if !cluster.Network.External && !networkExternalSet { - if networkExternalString, ok := node.Labels[k3d.LabelNetworkExternal]; ok { + if networkExternalString, ok := node.RuntimeLabels[k3d.LabelNetworkExternal]; ok { if networkExternal, err := strconv.ParseBool(networkExternalString); err == nil { cluster.Network.External = networkExternal networkExternalSet = true @@ -752,14 +752,14 @@ func populateClusterFieldsFromLabels(cluster *k3d.Cluster) error { // get image volume // TODO: enable external image volumes the same way we do it with networks if cluster.ImageVolume == "" { - if imageVolumeName, ok := node.Labels[k3d.LabelImageVolume]; ok { + if imageVolumeName, ok := node.RuntimeLabels[k3d.LabelImageVolume]; ok { cluster.ImageVolume = imageVolumeName } } // get k3s cluster's token if cluster.Token == "" { - if token, ok := node.Labels[k3d.LabelClusterToken]; ok { + if token, ok := node.RuntimeLabels[k3d.LabelClusterToken]; ok { cluster.Token = token } } diff --git a/pkg/client/kubeconfig.go b/pkg/client/kubeconfig.go index 957f593d..a016f153 100644 --- a/pkg/client/kubeconfig.go +++ b/pkg/client/kubeconfig.go @@ -131,11 +131,11 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C APIHost := k3d.DefaultAPIHost for _, server := range serverNodes { - if _, ok := server.Labels[k3d.LabelServerAPIPort]; ok { + if _, ok := server.RuntimeLabels[k3d.LabelServerAPIPort]; ok { chosenServer = server - APIPort = server.Labels[k3d.LabelServerAPIPort] - if _, ok := server.Labels[k3d.LabelServerAPIHost]; ok { - APIHost = server.Labels[k3d.LabelServerAPIHost] + APIPort = server.RuntimeLabels[k3d.LabelServerAPIPort] + if _, ok := server.RuntimeLabels[k3d.LabelServerAPIHost]; ok { + APIHost = server.RuntimeLabels[k3d.LabelServerAPIHost] } break } diff --git a/pkg/client/node.go b/pkg/client/node.go index ebbf28f0..a00f4666 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -59,8 +59,8 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N node.Networks = []string{cluster.Network.Name} // skeleton - if node.Labels == nil { - node.Labels = map[string]string{ + if node.RuntimeLabels == nil { + node.RuntimeLabels = map[string]string{ k3d.LabelRole: string(node.Role), } } @@ -141,7 +141,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N } } if !k3sURLFound { - if url, ok := node.Labels[k3d.LabelClusterURL]; ok { + if url, ok := node.RuntimeLabels[k3d.LabelClusterURL]; ok { node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", url)) } else { log.Warnln("Failed to find K3S_URL value!") @@ -381,18 +381,22 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c // ### Labels ### labels := make(map[string]string) - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { labels[k] = v } - for k, v := range k3d.DefaultObjectLabelsVar { + for k, v := range k3d.DefaultRuntimeLabelsVar { labels[k] = v } - for k, v := range node.Labels { + for k, v := range node.RuntimeLabels { labels[k] = v } - node.Labels = labels + node.RuntimeLabels = labels // second most important: the node role label - node.Labels[k3d.LabelRole] = string(node.Role) + node.RuntimeLabels[k3d.LabelRole] = string(node.Role) + + for k, v := range node.K3sNodeLabels { + node.Args = append(node.Args, "--node-label", fmt.Sprintf("%s=%s", k, v)) + } // ### Environment ### node.Env = append(node.Env, k3d.DefaultNodeEnv...) // append default node env vars @@ -469,7 +473,7 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, o // update the server loadbalancer if !opts.SkipLBUpdate && (node.Role == k3d.ServerRole || node.Role == k3d.AgentRole) { - cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.Labels[k3d.LabelClusterName]}) + cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.RuntimeLabels[k3d.LabelClusterName]}) if err != nil { log.Errorf("Failed to find cluster for node '%s'", node.Name) return err @@ -493,10 +497,6 @@ func patchAgentSpec(node *k3d.Node) error { node.Cmd = []string{"agent"} } - for k, v := range node.K3sNodeLabels { - node.Args = append(node.Args, "--node-label", fmt.Sprintf("%s=%s", k, v)) - } - return nil } @@ -509,9 +509,9 @@ func patchServerSpec(node *k3d.Node, runtime runtimes.Runtime) error { // Add labels and TLS SAN for the exposed API // FIXME: For now, the labels concerning the API on the server nodes are only being used for configuring the kubeconfig - node.Labels[k3d.LabelServerAPIHostIP] = node.ServerOpts.KubeAPI.Binding.HostIP // TODO: maybe get docker machine IP here - node.Labels[k3d.LabelServerAPIHost] = node.ServerOpts.KubeAPI.Host - node.Labels[k3d.LabelServerAPIPort] = node.ServerOpts.KubeAPI.Binding.HostPort + node.RuntimeLabels[k3d.LabelServerAPIHostIP] = node.ServerOpts.KubeAPI.Binding.HostIP // TODO: maybe get docker machine IP here + node.RuntimeLabels[k3d.LabelServerAPIHost] = node.ServerOpts.KubeAPI.Host + node.RuntimeLabels[k3d.LabelServerAPIPort] = node.ServerOpts.KubeAPI.Binding.HostPort // If the runtime is docker, attempt to use the docker host if runtime == runtimes.Docker { @@ -519,19 +519,19 @@ func patchServerSpec(node *k3d.Node, runtime runtimes.Runtime) error { if dockerHost != "" { dockerHost = strings.Split(dockerHost, ":")[0] // remove the port log.Tracef("Using docker host %s", dockerHost) - node.Labels[k3d.LabelServerAPIHostIP] = dockerHost - node.Labels[k3d.LabelServerAPIHost] = dockerHost + node.RuntimeLabels[k3d.LabelServerAPIHostIP] = dockerHost + node.RuntimeLabels[k3d.LabelServerAPIHost] = dockerHost } } - node.Args = append(node.Args, "--tls-san", node.Labels[k3d.LabelServerAPIHost]) // add TLS SAN for non default host name + node.Args = append(node.Args, "--tls-san", node.RuntimeLabels[k3d.LabelServerAPIHost]) // add TLS SAN for non default host name return nil } // NodeList returns a list of all existing clusters func NodeList(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error) { - nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultObjectLabels) + nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultRuntimeLabels) if err != nil { log.Errorln("Failed to get nodes") return nil, err diff --git a/pkg/client/registry.go b/pkg/client/registry.go index 4df7ff7a..9e1dc86c 100644 --- a/pkg/client/registry.go +++ b/pkg/client/registry.go @@ -77,7 +77,7 @@ func RegistryCreate(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Regi } // setup the node labels - registryNode.Labels = map[string]string{ + registryNode.RuntimeLabels = map[string]string{ k3d.LabelClusterName: reg.ClusterRef, k3d.LabelRole: string(k3d.RegistryRole), k3d.LabelRegistryHost: reg.ExposureOpts.Host, // TODO: docker machine host? @@ -85,11 +85,11 @@ func RegistryCreate(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Regi k3d.LabelRegistryPortExternal: reg.ExposureOpts.Binding.HostPort, k3d.LabelRegistryPortInternal: reg.ExposureOpts.Port.Port(), } - for k, v := range k3d.DefaultObjectLabels { - registryNode.Labels[k] = v + for k, v := range k3d.DefaultRuntimeLabels { + registryNode.RuntimeLabels[k] = v } - for k, v := range k3d.DefaultObjectLabelsVar { - registryNode.Labels[k] = v + for k, v := range k3d.DefaultRuntimeLabelsVar { + registryNode.RuntimeLabels[k] = v } // port diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index eb15f152..41d76422 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -64,12 +64,6 @@ func TestReadSimpleConfig(t *testing.T) { NodeFilters: []string{"loadbalancer"}, }, }, - Labels: []conf.LabelWithNodeFilters{ - { - Label: "foo=bar", - NodeFilters: []string{"server[0]", "loadbalancer"}, - }, - }, Env: []conf.EnvVarWithNodeFilters{ { EnvVar: "bar=baz", @@ -90,11 +84,25 @@ func TestReadSimpleConfig(t *testing.T) { NodeFilters: []string{"server[*]"}, }, }, + NodeLabels: []conf.LabelWithNodeFilters{ + { + Label: "foo=bar", + NodeFilters: []string{"server[0]", "loadbalancer"}, + }, + }, }, KubeconfigOptions: conf.SimpleConfigOptionsKubeconfig{ UpdateDefaultKubeconfig: true, SwitchCurrentContext: true, }, + Runtime: conf.SimpleConfigOptionsRuntime{ + Labels: []conf.LabelWithNodeFilters{ + { + Label: "foo=bar", + NodeFilters: []string{"server[0]", "loadbalancer"}, + }, + }, + }, }, } diff --git a/pkg/config/test_assets/config_test_simple.yaml b/pkg/config/test_assets/config_test_simple.yaml index 4e132176..f8f873cb 100644 --- a/pkg/config/test_assets/config_test_simple.yaml +++ b/pkg/config/test_assets/config_test_simple.yaml @@ -22,11 +22,6 @@ env: - envVar: bar=baz nodeFilters: - all -labels: - - label: foo=bar - nodeFilters: - - "server[0]" - - loadbalancer options: k3d: @@ -39,6 +34,17 @@ options: - arg: --tls-san=127.0.0.1 nodeFilters: - "server[*]" + nodeLabels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer kubeconfig: updateDefaultKubeconfig: true switchCurrentContext: true + runtime: + labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer diff --git a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml index b9e75fb6..7b9bc8a0 100644 --- a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml +++ b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml @@ -22,11 +22,6 @@ env: - envVar: bar=baz nodeFilters: - all -labels: - - label: foo=bar - nodeFilters: - - server[0] - - loadbalancer options: k3d: @@ -39,6 +34,17 @@ options: - arg: --tls-san=127.0.0.1 nodeFilters: - "server[*]" + nodeLabels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer kubeconfig: updateDefaultKubeconfig: true - switchCurrentContext: true \ No newline at end of file + switchCurrentContext: true + runtime: + labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer diff --git a/pkg/config/transform.go b/pkg/config/transform.go index 0ec43686..59e674d9 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -190,23 +190,47 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } } - // -> LABELS - for _, labelWithNodeFilters := range simpleConfig.Labels { - if len(labelWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { - return nil, fmt.Errorf("Labelmapping '%s' lacks a node filter, but there's more than one node", labelWithNodeFilters.Label) + // -> K3S NODE LABELS + for _, k3sNodeLabelWithNodeFilters := range simpleConfig.Options.K3sOptions.NodeLabels { + if len(k3sNodeLabelWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { + return nil, fmt.Errorf("K3sNodeLabelmapping '%s' lacks a node filter, but there's more than one node", k3sNodeLabelWithNodeFilters.Label) } - nodes, err := util.FilterNodes(nodeList, labelWithNodeFilters.NodeFilters) + nodes, err := util.FilterNodes(nodeList, k3sNodeLabelWithNodeFilters.NodeFilters) if err != nil { return nil, err } for _, node := range nodes { - if node.Labels == nil { - node.Labels = make(map[string]string) // ensure that the map is initialized + if node.K3sNodeLabels == nil { + node.K3sNodeLabels = make(map[string]string) // ensure that the map is initialized } - k, v := util.SplitLabelKeyValue(labelWithNodeFilters.Label) - node.Labels[k] = v + k, v := util.SplitLabelKeyValue(k3sNodeLabelWithNodeFilters.Label) + node.K3sNodeLabels[k] = v + + } + } + + // -> RUNTIME LABELS + for _, runtimeLabelWithNodeFilters := range simpleConfig.Options.Runtime.Labels { + if len(runtimeLabelWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { + return nil, fmt.Errorf("RuntimeLabelmapping '%s' lacks a node filter, but there's more than one node", runtimeLabelWithNodeFilters.Label) + } + + nodes, err := util.FilterNodes(nodeList, runtimeLabelWithNodeFilters.NodeFilters) + if err != nil { + return nil, err + } + + for _, node := range nodes { + if node.RuntimeLabels == nil { + node.RuntimeLabels = make(map[string]string) // ensure that the map is initialized + } + k, v := util.SplitLabelKeyValue(runtimeLabelWithNodeFilters.Label) + + cliutil.ValidateRuntimeLabelKey(k) + + node.RuntimeLabels[k] = v } } @@ -260,7 +284,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } // ensure, that we have the default object labels - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { clusterCreateOpts.GlobalLabels[k] = v } diff --git a/pkg/config/v1alpha3/migrations.go b/pkg/config/v1alpha3/migrations.go index 5c894ca1..fd1bc6dc 100644 --- a/pkg/config/v1alpha3/migrations.go +++ b/pkg/config/v1alpha3/migrations.go @@ -49,6 +49,15 @@ func MigrateV1Alpha2(input configtypes.Config) (configtypes.Config, error) { return nil, err } + cfg.Options.Runtime.Labels = []LabelWithNodeFilters{} + + for _, label := range input.(v1alpha2.SimpleConfig).Labels { + cfg.Options.Runtime.Labels = append(cfg.Options.Runtime.Labels, LabelWithNodeFilters{ + Label: label.Label, + NodeFilters: label.NodeFilters, + }) + } + cfg.Options.K3sOptions.ExtraArgs = []K3sArgWithNodeFilters{} for _, arg := range input.(v1alpha2.SimpleConfig).Options.K3sOptions.ExtraServerArgs { diff --git a/pkg/config/v1alpha3/schema.json b/pkg/config/v1alpha3/schema.json index 2b07c02c..1deb0060 100644 --- a/pkg/config/v1alpha3/schema.json +++ b/pkg/config/v1alpha3/schema.json @@ -108,21 +108,6 @@ "additionalProperties": false } }, - "labels": { - "type": "array", - "items": { - "type": "object", - "properties": { - "label": { - "type": "string" - }, - "nodeFilters": { - "$ref": "#/definitions/nodeFilters" - } - }, - "additionalProperties": false - } - }, "options": { "type": "object", "properties": { @@ -170,12 +155,24 @@ "properties": { "arg": { "type": "string", - "examples": [ - "--tls-san=127.0.0.1", - "--disable=traefik" - ] + "examples": ["--tls-san=127.0.0.1", "--disable=traefik"] }, - "nodeFilters": { + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + }, + "nodeLabels": { + "type": "array", + "items": { + "type": "object", + "properties": { + "label": { + "type": "string" + }, + "nodeFilters": { "$ref": "#/definitions/nodeFilters" } }, @@ -210,6 +207,21 @@ }, "agentsMemory": { "type": "string" + }, + "labels": { + "type": "array", + "items": { + "type": "object", + "properties": { + "label": { + "type": "string" + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } } } } diff --git a/pkg/config/v1alpha3/types.go b/pkg/config/v1alpha3/types.go index 22465c71..9783d1c1 100644 --- a/pkg/config/v1alpha3/types.go +++ b/pkg/config/v1alpha3/types.go @@ -95,9 +95,10 @@ type SimpleConfigOptions struct { } type SimpleConfigOptionsRuntime struct { - GPURequest string `mapstructure:"gpuRequest" yaml:"gpuRequest"` - ServersMemory string `mapstructure:"serversMemory" yaml:"serversMemory"` - AgentsMemory string `mapstructure:"agentsMemory" yaml:"agentsMemory"` + GPURequest string `mapstructure:"gpuRequest" yaml:"gpuRequest"` + ServersMemory string `mapstructure:"serversMemory" yaml:"serversMemory"` + AgentsMemory string `mapstructure:"agentsMemory" yaml:"agentsMemory"` + Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels"` } type SimpleConfigOptionsK3d struct { @@ -111,7 +112,8 @@ type SimpleConfigOptionsK3d struct { } type SimpleConfigOptionsK3s struct { - ExtraArgs []K3sArgWithNodeFilters `mapstructure:"extraArgs" yaml:"extraArgs"` + ExtraArgs []K3sArgWithNodeFilters `mapstructure:"extraArgs" yaml:"extraArgs"` + NodeLabels []LabelWithNodeFilters `mapstructure:"nodeLabels" yaml:"nodeLabels"` } // SimpleConfig describes the toplevel k3d configuration file. @@ -127,7 +129,6 @@ type SimpleConfig struct { ClusterToken string `mapstructure:"token" yaml:"clusterToken" json:"clusterToken,omitempty"` // default: auto-generated Volumes []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes" json:"volumes,omitempty"` Ports []PortWithNodeFilters `mapstructure:"ports" yaml:"ports" json:"ports,omitempty"` - Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels" json:"labels,omitempty"` Options SimpleConfigOptions `mapstructure:"options" yaml:"options" json:"options,omitempty"` Env []EnvVarWithNodeFilters `mapstructure:"env" yaml:"env" json:"env,omitempty"` Registries struct { diff --git a/pkg/runtimes/docker/container.go b/pkg/runtimes/docker/container.go index 6124d4e3..296c3070 100644 --- a/pkg/runtimes/docker/container.go +++ b/pkg/runtimes/docker/container.go @@ -151,7 +151,7 @@ func getNodeContainer(ctx context.Context, node *k3d.Node) (*types.Container, er // (1) list containers which have the default k3d labels attached filters := filters.NewArgs() - for k, v := range node.Labels { + for k, v := range node.RuntimeLabels { filters.Add("label", fmt.Sprintf("%s=%s", k, v)) } diff --git a/pkg/runtimes/docker/network.go b/pkg/runtimes/docker/network.go index 80f62f4c..b1661233 100644 --- a/pkg/runtimes/docker/network.go +++ b/pkg/runtimes/docker/network.go @@ -147,7 +147,7 @@ func (d Docker) CreateNetworkIfNotPresent(ctx context.Context, inNet *k3d.Cluste // (3) Create a new network netCreateOpts := types.NetworkCreate{ CheckDuplicate: true, - Labels: k3d.DefaultObjectLabels, + Labels: k3d.DefaultRuntimeLabels, } // we want a managed (user-defined) network, but user didn't specify a subnet, so we try to auto-generate one diff --git a/pkg/runtimes/docker/node.go b/pkg/runtimes/docker/node.go index b5e3b39a..c7c3b37d 100644 --- a/pkg/runtimes/docker/node.go +++ b/pkg/runtimes/docker/node.go @@ -178,7 +178,7 @@ func getContainersByLabel(ctx context.Context, labels map[string]string) ([]type // (1) list containers which have the default k3d labels attached filters := filters.NewArgs() - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { filters.Add("label", fmt.Sprintf("%s=%s", k, v)) } for k, v := range labels { diff --git a/pkg/runtimes/docker/translate.go b/pkg/runtimes/docker/translate.go index b6861aad..c8331c75 100644 --- a/pkg/runtimes/docker/translate.go +++ b/pkg/runtimes/docker/translate.go @@ -73,7 +73,7 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) { containerConfig.Env = node.Env /* Labels */ - containerConfig.Labels = node.Labels // has to include the role + containerConfig.Labels = node.RuntimeLabels // has to include the role /* Auto-Restart */ if node.Restart { @@ -162,10 +162,10 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) { // TranslateContainerToNode translates a docker container object into a k3d node representation func TranslateContainerToNode(cont *types.Container) (*k3d.Node, error) { node := &k3d.Node{ - Name: strings.TrimPrefix(cont.Names[0], "/"), // container name with leading '/' cut off - Image: cont.Image, - Labels: cont.Labels, - Role: k3d.NodeRoles[cont.Labels[k3d.LabelRole]], + Name: strings.TrimPrefix(cont.Names[0], "/"), // container name with leading '/' cut off + Image: cont.Image, + RuntimeLabels: cont.Labels, + Role: k3d.NodeRoles[cont.Labels[k3d.LabelRole]], // TODO: all the rest } return node, nil @@ -175,7 +175,7 @@ func TranslateContainerToNode(cont *types.Container) (*k3d.Node, error) { func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d.Node, error) { // first, make sure, that it's actually a k3d managed container by checking if it has all the default labels - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { log.Tracef("TranslateContainerDetailsToNode: Checking for default object label %s=%s on container %s", k, v, containerDetails.Name) found := false for lk, lv := range containerDetails.Config.Labels { @@ -273,22 +273,22 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d } node := &k3d.Node{ - Name: strings.TrimPrefix(containerDetails.Name, "/"), // container name with leading '/' cut off - Role: k3d.NodeRoles[containerDetails.Config.Labels[k3d.LabelRole]], - Image: containerDetails.Image, - Volumes: containerDetails.HostConfig.Binds, - Env: env, - Cmd: containerDetails.Config.Cmd, - Args: []string{}, // empty, since Cmd already contains flags - Ports: containerDetails.HostConfig.PortBindings, - Restart: restart, - Created: containerDetails.Created, - Labels: labels, - Networks: orderedNetworks, - ServerOpts: serverOpts, - AgentOpts: k3d.AgentOpts{}, - State: nodeState, - Memory: memoryStr, + Name: strings.TrimPrefix(containerDetails.Name, "/"), // container name with leading '/' cut off + Role: k3d.NodeRoles[containerDetails.Config.Labels[k3d.LabelRole]], + Image: containerDetails.Image, + Volumes: containerDetails.HostConfig.Binds, + Env: env, + Cmd: containerDetails.Config.Cmd, + Args: []string{}, // empty, since Cmd already contains flags + Ports: containerDetails.HostConfig.PortBindings, + Restart: restart, + Created: containerDetails.Created, + RuntimeLabels: labels, + Networks: orderedNetworks, + ServerOpts: serverOpts, + AgentOpts: k3d.AgentOpts{}, + State: nodeState, + Memory: memoryStr, } return node, nil } diff --git a/pkg/runtimes/docker/translate_test.go b/pkg/runtimes/docker/translate_test.go index 38c6f3a2..4243dc57 100644 --- a/pkg/runtimes/docker/translate_test.go +++ b/pkg/runtimes/docker/translate_test.go @@ -52,9 +52,9 @@ func TestTranslateNodeToContainer(t *testing.T) { }, }, }, - Restart: true, - Labels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"}, - Networks: []string{"mynet"}, + Restart: true, + RuntimeLabels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"}, + Networks: []string{"mynet"}, } init := true diff --git a/pkg/runtimes/docker/util.go b/pkg/runtimes/docker/util.go index 7720c1e5..f64c39a2 100644 --- a/pkg/runtimes/docker/util.go +++ b/pkg/runtimes/docker/util.go @@ -44,7 +44,7 @@ import ( // GetDefaultObjectLabelsFilter returns docker type filters created from k3d labels func GetDefaultObjectLabelsFilter(clusterName string) filters.Args { filters := filters.NewArgs() - for key, value := range k3d.DefaultObjectLabels { + for key, value := range k3d.DefaultRuntimeLabels { filters.Add("label", fmt.Sprintf("%s=%s", key, value)) } filters.Add("label", fmt.Sprintf("%s=%s", k3d.LabelClusterName, clusterName)) diff --git a/pkg/runtimes/docker/volume.go b/pkg/runtimes/docker/volume.go index 6c0f4c80..32d39634 100644 --- a/pkg/runtimes/docker/volume.go +++ b/pkg/runtimes/docker/volume.go @@ -49,10 +49,10 @@ func (d Docker) CreateVolume(ctx context.Context, name string, labels map[string DriverOpts: map[string]string{}, } - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { volumeCreateOptions.Labels[k] = v } - for k, v := range k3d.DefaultObjectLabelsVar { + for k, v := range k3d.DefaultRuntimeLabelsVar { volumeCreateOptions.Labels[k] = v } diff --git a/pkg/tools/tools.go b/pkg/tools/tools.go index 8cf728a0..032e72d2 100644 --- a/pkg/tools/tools.go +++ b/pkg/tools/tools.go @@ -65,7 +65,7 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, var ok bool for _, node := range cluster.Nodes { if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole { - if imageVolume, ok = node.Labels[k3d.LabelImageVolume]; ok { + if imageVolume, ok = node.RuntimeLabels[k3d.LabelImageVolume]; ok { break } } @@ -269,23 +269,23 @@ func containsVersionPart(imageTag string) bool { // startToolsNode will start a new k3d tools container and connect it to the network of the chosen cluster func startToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, network string, volumes []string) (*k3d.Node, error) { labels := map[string]string{} - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { labels[k] = v } - for k, v := range k3d.DefaultObjectLabelsVar { + for k, v := range k3d.DefaultRuntimeLabelsVar { labels[k] = v } node := &k3d.Node{ - Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name), - Image: fmt.Sprintf("%s:%s", k3d.DefaultToolsImageRepo, version.GetHelperImageVersion()), - Role: k3d.NoRole, - Volumes: volumes, - Networks: []string{network}, - Cmd: []string{}, - Args: []string{"noop"}, - Labels: k3d.DefaultObjectLabels, + Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name), + Image: fmt.Sprintf("%s:%s", k3d.DefaultToolsImageRepo, version.GetHelperImageVersion()), + Role: k3d.NoRole, + Volumes: volumes, + Networks: []string{network}, + Cmd: []string{}, + Args: []string{"noop"}, + RuntimeLabels: k3d.DefaultRuntimeLabels, } - node.Labels[k3d.LabelClusterName] = cluster.Name + node.RuntimeLabels[k3d.LabelClusterName] = cluster.Name if err := k3dc.NodeRun(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil { log.Errorf("Failed to create tools container for cluster '%s'", cluster.Name) return node, err diff --git a/pkg/types/types.go b/pkg/types/types.go index e31141ec..3d5fa830 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -105,13 +105,13 @@ var ClusterExternalNodeRoles = []Role{ RegistryRole, } -// DefaultObjectLabels specifies a set of labels that will be attached to k3d objects by default -var DefaultObjectLabels = map[string]string{ +// DefaultRuntimeLabels specifies a set of labels that will be attached to k3d runtime objects by default +var DefaultRuntimeLabels = map[string]string{ "app": "k3d", } -// DefaultObjectLabelsVar specifies a set of labels that will be attached to k3d objects by default but are not static (e.g. across k3d versions) -var DefaultObjectLabelsVar = map[string]string{ +// DefaultRuntimeLabelsVar specifies a set of labels that will be attached to k3d runtime objects by default but are not static (e.g. across k3d versions) +var DefaultRuntimeLabelsVar = map[string]string{ "k3d.version": version.GetVersion(), } @@ -338,7 +338,7 @@ type Node struct { Ports nat.PortMap `yaml:"portMappings" json:"portMappings,omitempty"` Restart bool `yaml:"restart" json:"restart,omitempty"` Created string `yaml:"created" json:"created,omitempty"` - Labels map[string]string // filled automatically + RuntimeLabels map[string]string `yaml:"runtimeLabels" json:"runtimeLabels,omitempty"` K3sNodeLabels map[string]string `yaml:"k3sNodeLabels" json:"k3sNodeLabels,omitempty"` Networks []string // filled automatically ExtraHosts []string // filled automatically diff --git a/tests/assets/config_test_simple.yaml b/tests/assets/config_test_simple.yaml index c1d05e2b..75588924 100755 --- a/tests/assets/config_test_simple.yaml +++ b/tests/assets/config_test_simple.yaml @@ -22,11 +22,6 @@ env: - envVar: bar=baz,bob nodeFilters: - all -labels: - - label: foo=bar - nodeFilters: - - server[0] - - loadbalancer registries: create: true use: [] @@ -47,6 +42,17 @@ options: - arg: --tls-san=127.0.0.1 nodeFilters: - server[*] + nodeLabels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer kubeconfig: updateDefaultKubeconfig: true - switchCurrentContext: true \ No newline at end of file + switchCurrentContext: true + runtime: + labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer diff --git a/tests/assets/config_test_simple_migration_v1alpha3.yaml b/tests/assets/config_test_simple_migration_v1alpha3.yaml index c1d05e2b..98eccbde 100755 --- a/tests/assets/config_test_simple_migration_v1alpha3.yaml +++ b/tests/assets/config_test_simple_migration_v1alpha3.yaml @@ -22,11 +22,6 @@ env: - envVar: bar=baz,bob nodeFilters: - all -labels: - - label: foo=bar - nodeFilters: - - server[0] - - loadbalancer registries: create: true use: [] @@ -49,4 +44,10 @@ options: - server[*] kubeconfig: updateDefaultKubeconfig: true - switchCurrentContext: true \ No newline at end of file + switchCurrentContext: true + runtime: + labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer diff --git a/tests/common.sh b/tests/common.sh index f1529fbb..4d4de061 100755 --- a/tests/common.sh +++ b/tests/common.sh @@ -175,4 +175,10 @@ docker_assert_container_label() { # $1 = container/node name # $2 = label to assert docker inspect --format '{{ range $k, $v := .Config.Labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' "$1" | grep -E "^$2$" +} + +k3s_assert_node_label() { + # $1 = node name + # $2 = label to assert + kubectl get node "$1" --output go-template='{{ range $k, $v := .metadata.labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' | grep -E "^$2$" } \ No newline at end of file diff --git a/tests/test_config_file.sh b/tests/test_config_file.sh index b367bac1..8793eb44 100755 --- a/tests/test_config_file.sh +++ b/tests/test_config_file.sh @@ -18,7 +18,7 @@ fi export CURRENT_STAGE="Test | config-file | $K3S_IMAGE_TAG" -clustername="ConfigTest" +clustername="configtest" highlight "[START] ConfigTest $EXTRA_TITLE" @@ -45,6 +45,10 @@ exec_in_node "k3d-$clustername-server-0" "env" | grep "bar=baz,bob" || failed "E info "Ensuring that container labels have been set as stated in the config" docker_assert_container_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on container/node k3d-$clustername-server-0" +## K3s Node Labels +info "Ensuring that k3s node labels have been set as stated in the config" +k3s_assert_node_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on node k3d-$clustername-server-0" + ## Registry Node info "Ensuring, that we have a registry node present" $EXE node list "k3d-$clustername-registry" || failed "Expected k3d-$clustername-registry to be present" diff --git a/tests/test_config_with_overrides.sh b/tests/test_config_with_overrides.sh index 294491b1..6b705a06 100755 --- a/tests/test_config_with_overrides.sh +++ b/tests/test_config_with_overrides.sh @@ -44,6 +44,10 @@ exec_in_node "k3d-$clustername-agent-1" "env" | grep "x=y" || failed "Expected e info "Ensuring that container labels have been set as stated in the config" docker_assert_container_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on container/node k3d-$clustername-server-0" +## K3s Node Labels +info "Ensuring that k3s node labels have been set as stated in the config" +k3s_assert_node_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on node k3d-$clustername-server-0" + ## Registry Node info "Ensuring, that we DO NOT have a registry node present" $EXE node list "k3d-$clustername-registry" && failed "Expected k3d-$clustername-registry to NOT be present" From 897e49a8ee9c19096f6576c361b02e62a522c462 Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Wed, 16 Jun 2021 15:59:43 +0200 Subject: [PATCH 04/46] [FEATURE] add ability to add ports to an existing loadbalancer (#615) --- cmd/node/node.go | 1 + cmd/node/nodeEdit.go | 113 ++++ go.mod | 1 + go.sum | 4 + pkg/client/node.go | 150 +++++ pkg/runtimes/docker/node.go | 18 + pkg/runtimes/docker/translate.go | 10 +- pkg/runtimes/runtime.go | 1 + pkg/util/ports.go | 19 + tests/test_node_edit.sh | 39 ++ .../mitchellh/copystructure/LICENSE | 21 + .../mitchellh/copystructure/README.md | 21 + .../mitchellh/copystructure/copier_time.go | 15 + .../mitchellh/copystructure/copystructure.go | 631 ++++++++++++++++++ .../github.com/mitchellh/copystructure/go.mod | 5 + .../github.com/mitchellh/copystructure/go.sum | 2 + .../mitchellh/reflectwalk/.travis.yml | 1 + .../github.com/mitchellh/reflectwalk/LICENSE | 21 + .../mitchellh/reflectwalk/README.md | 6 + .../github.com/mitchellh/reflectwalk/go.mod | 1 + .../mitchellh/reflectwalk/location.go | 19 + .../mitchellh/reflectwalk/location_string.go | 16 + .../mitchellh/reflectwalk/reflectwalk.go | 420 ++++++++++++ vendor/modules.txt | 5 + 24 files changed, 1531 insertions(+), 9 deletions(-) create mode 100644 cmd/node/nodeEdit.go create mode 100755 tests/test_node_edit.sh create mode 100644 vendor/github.com/mitchellh/copystructure/LICENSE create mode 100644 vendor/github.com/mitchellh/copystructure/README.md create mode 100644 vendor/github.com/mitchellh/copystructure/copier_time.go create mode 100644 vendor/github.com/mitchellh/copystructure/copystructure.go create mode 100644 vendor/github.com/mitchellh/copystructure/go.mod create mode 100644 vendor/github.com/mitchellh/copystructure/go.sum create mode 100644 vendor/github.com/mitchellh/reflectwalk/.travis.yml create mode 100644 vendor/github.com/mitchellh/reflectwalk/LICENSE create mode 100644 vendor/github.com/mitchellh/reflectwalk/README.md create mode 100644 vendor/github.com/mitchellh/reflectwalk/go.mod create mode 100644 vendor/github.com/mitchellh/reflectwalk/location.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/location_string.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk.go diff --git a/cmd/node/node.go b/cmd/node/node.go index 5266e150..5d132fbe 100644 --- a/cmd/node/node.go +++ b/cmd/node/node.go @@ -48,6 +48,7 @@ func NewCmdNode() *cobra.Command { cmd.AddCommand(NewCmdNodeStop()) cmd.AddCommand(NewCmdNodeDelete()) cmd.AddCommand(NewCmdNodeList()) + cmd.AddCommand(NewCmdNodeEdit()) // add flags diff --git a/cmd/node/nodeEdit.go b/cmd/node/nodeEdit.go new file mode 100644 index 00000000..e723f43e --- /dev/null +++ b/cmd/node/nodeEdit.go @@ -0,0 +1,113 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package node + +import ( + "github.com/docker/go-connections/nat" + "github.com/rancher/k3d/v4/cmd/util" + "github.com/rancher/k3d/v4/pkg/client" + "github.com/rancher/k3d/v4/pkg/runtimes" + k3d "github.com/rancher/k3d/v4/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +// NewCmdNodeEdit returns a new cobra command +func NewCmdNodeEdit() *cobra.Command { + + // create new cobra command + cmd := &cobra.Command{ + Use: "edit NAME", + Short: "[EXPERIMENTAL] Edit node(s).", + Long: `[EXPERIMENTAL] Edit node(s).`, + Args: cobra.ExactArgs(1), + Aliases: []string{"update"}, + ValidArgsFunction: util.ValidArgsAvailableNodes, + Run: func(cmd *cobra.Command, args []string) { + + existingNode, changeset := parseEditNodeCmd(cmd, args) + + log.Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingNode, changeset) + + if err := client.NodeEdit(cmd.Context(), runtimes.SelectedRuntime, existingNode, changeset); err != nil { + log.Fatalln(err) + } + + log.Infof("Successfully updated %s", existingNode.Name) + + }, + } + + // add subcommands + + // add flags + cmd.Flags().StringArray("port-add", nil, "[EXPERIMENTAL] (serverlb only!) Map ports from the node container to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d node edit k3d-mycluster-serverlb --port-add 8080:80`") + + // done + return cmd +} + +// parseEditNodeCmd parses the command input into variables required to delete nodes +func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node) { + + existingNode, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: args[0]}) + if err != nil { + log.Fatalln(err) + } + + if existingNode == nil { + log.Infof("Node %s not found", args[0]) + return nil, nil + } + + if existingNode.Role != k3d.LoadBalancerRole { + log.Fatalln("Currently only the loadbalancer can be updated!") + } + + changeset := &k3d.Node{} + + /* + * --port-add + */ + portFlags, err := cmd.Flags().GetStringArray("port-add") + if err != nil { + log.Errorln(err) + return nil, nil + } + + // init portmap + changeset.Ports = nat.PortMap{} + + for _, flag := range portFlags { + + portmappings, err := nat.ParsePortSpec(flag) + if err != nil { + log.Fatalf("Failed to parse port spec '%s': %+v", flag, err) + } + + for _, pm := range portmappings { + changeset.Ports[pm.Port] = append(changeset.Ports[pm.Port], pm.Binding) + } + } + + return existingNode, changeset +} diff --git a/go.mod b/go.mod index 74b0a384..6893fcd8 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5 github.com/imdario/mergo v0.3.12 github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de + github.com/mitchellh/copystructure v1.2.0 github.com/mitchellh/go-homedir v1.1.0 github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf // indirect diff --git a/go.sum b/go.sum index 47bcb66f..879f84b4 100644 --- a/go.sum +++ b/go.sum @@ -368,6 +368,8 @@ github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpe github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -380,6 +382,8 @@ github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= diff --git a/pkg/client/node.go b/pkg/client/node.go index a00f4666..019a901b 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -33,6 +33,9 @@ import ( "strings" "time" + copystruct "github.com/mitchellh/copystructure" + + "github.com/docker/go-connections/nat" dockerunits "github.com/docker/go-units" "github.com/imdario/mergo" "github.com/rancher/k3d/v4/pkg/actions" @@ -639,3 +642,150 @@ nodeLoop: return resultList } + +// NodeEdit let's you update an existing node +func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, changeset *k3d.Node) error { + + /* + * Make a deep copy of the existing node + */ + + result, err := CopyNode(ctx, existingNode, CopyNodeOpts{keepState: false}) + if err != nil { + return err + } + + /* + * Apply changes + */ + + // === Ports === + if result.Ports == nil { + result.Ports = nat.PortMap{} + } + for port, portbindings := range changeset.Ports { + loopChangesetPortbindings: + for _, portbinding := range portbindings { + + // loop over existing portbindings to avoid port collisions (docker doesn't check for it) + for _, existingPB := range result.Ports[port] { + if util.IsPortBindingEqual(portbinding, existingPB) { // also matches on "equal" HostIPs (127.0.0.1, "", 0.0.0.0) + log.Tracef("Skipping existing PortBinding: %+v", existingPB) + continue loopChangesetPortbindings + } + } + log.Tracef("Adding portbinding %+v for port %s", portbinding, port.Port()) + result.Ports[port] = append(result.Ports[port], portbinding) + } + } + + // --- Loadbalancer specifics --- + if result.Role == k3d.LoadBalancerRole { + nodeEditApplyLBSpecifics(ctx, result) + } + + // replace existing node + return NodeReplace(ctx, runtime, existingNode, result) + +} + +func nodeEditApplyLBSpecifics(ctx context.Context, lbNode *k3d.Node) { + tcp_ports := []string{} + udp_ports := []string{} + for index, env := range lbNode.Env { + if strings.HasPrefix(env, "PORTS=") || strings.HasPrefix(env, "UDP_PORTS=") { + // Remove matching environment variable from slice (does not preserve order) + lbNode.Env[index] = lbNode.Env[len(lbNode.Env)-1] // copy last element to index of matching env + lbNode.Env[len(lbNode.Env)-1] = "" // remove last element + lbNode.Env = lbNode.Env[:len(lbNode.Env)-1] // truncate + } + } + + for port := range lbNode.Ports { + switch port.Proto() { + case "tcp": + tcp_ports = append(tcp_ports, port.Port()) + break + case "udp": + udp_ports = append(udp_ports, port.Port()) + break + default: + log.Warnf("Unknown port protocol %s for port %s", port.Proto(), port.Port()) + } + } + lbNode.Env = append(lbNode.Env, fmt.Sprintf("PORTS=%s", strings.Join(tcp_ports, ","))) + lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) +} + +func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.Node) error { + + // rename existing node + oldNameTemp := fmt.Sprintf("%s-%s", old.Name, util.GenerateRandomString(5)) + oldNameOriginal := old.Name + log.Infof("Renaming existing node %s to %s...", old.Name, oldNameTemp) + if err := runtime.RenameNode(ctx, old, oldNameTemp); err != nil { + return err + } + old.Name = oldNameTemp + + // create (not start) new node + log.Infof("Creating new node %s...", new.Name) + if err := NodeCreate(ctx, runtime, new, k3d.NodeCreateOpts{Wait: true}); err != nil { + if err := runtime.RenameNode(ctx, old, oldNameOriginal); err != nil { + return fmt.Errorf("Failed to create new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err) + } + return fmt.Errorf("Failed to create new node. Brought back old node: %+v", err) + } + + // stop existing/old node + log.Infof("Stopping existing node %s...", old.Name) + if err := runtime.StopNode(ctx, old); err != nil { + return err + } + + // start new node + log.Infof("Starting new node %s...", new.Name) + if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true}); err != nil { + if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil { + return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err) + } + if err := runtime.RenameNode(ctx, old, oldNameOriginal); err != nil { + return fmt.Errorf("Failed to start new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err) + } + old.Name = oldNameOriginal + if err := NodeStart(ctx, runtime, old, k3d.NodeStartOpts{Wait: true}); err != nil { + return fmt.Errorf("Failed to start new node. Also failed to restart old node: %+v", err) + } + return fmt.Errorf("Failed to start new node. Rolled back: %+v", err) + } + + // cleanup: delete old node + log.Infof("Deleting old node %s...", old.Name) + if err := NodeDelete(ctx, runtime, old, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil { + return err + } + + // done + return nil +} + +type CopyNodeOpts struct { + keepState bool +} + +func CopyNode(ctx context.Context, src *k3d.Node, opts CopyNodeOpts) (*k3d.Node, error) { + + targetCopy, err := copystruct.Copy(src) + if err != nil { + return nil, err + } + + result := targetCopy.(*k3d.Node) + + if !opts.keepState { + // ensure that node state is empty + result.State = k3d.NodeState{} + } + + return result, err +} diff --git a/pkg/runtimes/docker/node.go b/pkg/runtimes/docker/node.go index c7c3b37d..e8e1232c 100644 --- a/pkg/runtimes/docker/node.go +++ b/pkg/runtimes/docker/node.go @@ -447,3 +447,21 @@ func (d Docker) GetNodesInNetwork(ctx context.Context, network string) ([]*k3d.N return connectedNodes, nil } + +func (d Docker) RenameNode(ctx context.Context, node *k3d.Node, newName string) error { + // get the container for the given node + container, err := getNodeContainer(ctx, node) + if err != nil { + return err + } + + // create docker client + docker, err := GetDockerClient() + if err != nil { + log.Errorln("Failed to create docker client") + return err + } + defer docker.Close() + + return docker.ContainerRename(ctx, container.ID, newName) +} diff --git a/pkg/runtimes/docker/translate.go b/pkg/runtimes/docker/translate.go index c8331c75..564b0f6f 100644 --- a/pkg/runtimes/docker/translate.go +++ b/pkg/runtimes/docker/translate.go @@ -243,14 +243,6 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d } } - // env vars: only copy K3S_* and K3D_* // FIXME: should we really do this? Might be unexpected, if user has e.g. HTTP_PROXY vars - env := []string{} - for _, envVar := range containerDetails.Config.Env { - if strings.HasPrefix(envVar, "K3D_") || strings.HasPrefix(envVar, "K3S_") { - env = append(env, envVar) - } - } - // labels: only copy k3d.* labels labels := map[string]string{} for k, v := range containerDetails.Config.Labels { @@ -277,7 +269,7 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d Role: k3d.NodeRoles[containerDetails.Config.Labels[k3d.LabelRole]], Image: containerDetails.Image, Volumes: containerDetails.HostConfig.Binds, - Env: env, + Env: containerDetails.Config.Env, Cmd: containerDetails.Config.Cmd, Args: []string{}, // empty, since Cmd already contains flags Ports: containerDetails.HostConfig.PortBindings, diff --git a/pkg/runtimes/runtime.go b/pkg/runtimes/runtime.go index b27d87b1..d79cab77 100644 --- a/pkg/runtimes/runtime.go +++ b/pkg/runtimes/runtime.go @@ -52,6 +52,7 @@ type Runtime interface { GetHost() string CreateNode(context.Context, *k3d.Node) error DeleteNode(context.Context, *k3d.Node) error + RenameNode(context.Context, *k3d.Node, string) error GetNodesByLabel(context.Context, map[string]string) ([]*k3d.Node, error) GetNode(context.Context, *k3d.Node) (*k3d.Node, error) GetNodeStatus(context.Context, *k3d.Node) (bool, string, error) diff --git a/pkg/util/ports.go b/pkg/util/ports.go index acdc7322..4906ff57 100644 --- a/pkg/util/ports.go +++ b/pkg/util/ports.go @@ -25,6 +25,7 @@ package util import ( "net" + "github.com/docker/go-connections/nat" log "github.com/sirupsen/logrus" ) @@ -45,3 +46,21 @@ func GetFreePort() (int, error) { return tcpListener.Addr().(*net.TCPAddr).Port, nil } + +var equalHostIPs = map[string]interface{}{ + "": nil, + "127.0.0.1": nil, + "0.0.0.0": nil, + "localhost": nil, +} + +func IsPortBindingEqual(a, b nat.PortBinding) bool { + if a.HostPort == b.HostPort { + if _, ok := equalHostIPs[a.HostIP]; ok { + if _, ok := equalHostIPs[b.HostIP]; ok { + return true + } + } + } + return false +} diff --git a/tests/test_node_edit.sh b/tests/test_node_edit.sh new file mode 100755 index 00000000..b9a5de04 --- /dev/null +++ b/tests/test_node_edit.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; } + +# shellcheck source=./common.sh +source "$CURR_DIR/common.sh" + +export CURRENT_STAGE="Test | NodeEdit" + +highlight "[START] NodeEdit" + +clustername="test-node-edit" + +existingPortMappingHostPort="1111" +existingPortMappingContainerPort="2222" +newPortMappingHostPort="3333" +newPortMappingContainerPort="4444" + +info "Creating cluster $clustername..." +$EXE cluster create $clustername --port "$existingPortMappingHostPort:$existingPortMappingContainerPort@loadbalancer" || failed "could not create cluster $clustername" + +info "Checking cluster access..." +check_clusters "$clustername" || failed "error checking cluster access" + +info "Adding port-mapping to loadbalancer..." +$EXE node edit k3d-$clustername-serverlb --port-add $existingPortMappingHostPort:$existingPortMappingContainerPort --port-add $newPortMappingHostPort:$newPortMappingContainerPort || failed "failed to add port-mapping to serverlb in $clustername" + +info "Checking port-mappings..." +docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -E "^$existingPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" +docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -E "^$newPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" + +info "Checking cluster access..." +check_clusters "$clustername" || failed "error checking cluster access" + +info "Deleting cluster $clustername..." +$EXE cluster delete $clustername || failed "failed to delete the cluster $clustername" + +exit 0 diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 00000000..22985159 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 00000000..f0fbd2e5 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 00000000..db6a6aa1 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 00000000..8089e667 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,631 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +const tagKey = "copy" + +// Copy returns a deep copy of v. +// +// Copy is unable to copy unexported fields in a struct (lowercase field names). +// Unexported fields can't be reflected by the Go runtime and therefore +// copystructure can't perform any data copies. +// +// For structs, copy behavior can be controlled with struct tags. For example: +// +// struct { +// Name string +// Data *bytes.Buffer `copy:"shallow"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored, effectively resulting in it being +// assigned the zero value in the copy. +// +// * "shallow" - The field will be be shallow copied. This means that references +// values such as pointers, maps, slices, etc. will be directly assigned +// versus deep copied. +// +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// ShallowCopiers is a map of pointer types that behave specially +// when they are copied. If a type is found in this map while deep +// copying, the pointer value will be shallow copied and not walked +// into. +// +// The key should be the type, obtained using: reflect.TypeOf(value +// with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc + + // ShallowCopiers is a map of pointer types that when they are + // shallow copied no matter where they are encountered. Use the + // global ShallowCopiers if this is nil. + ShallowCopiers map[reflect.Type]struct{} +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + w.copiers = c.Copiers + + if c.ShallowCopiers == nil { + c.ShallowCopiers = ShallowCopiers + } + w.shallowCopiers = c.ShallowCopiers + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + copiers map[reflect.Type]CopierFunc + shallowCopiers map[reflect.Type]struct{} + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Pointer(v reflect.Value) error { + if _, ok := w.shallowCopiers[v.Type()]; ok { + // Shallow copy this value. Use the same logic as primitive, then + // return skip. + if err := w.Primitive(v); err != nil { + return err + } + + return reflectwalk.SkipEntry + } + + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := w.copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + switch f.Tag.Get(tagKey) { + case "shallow": + // If we're shallow copying then assign the value directly to the + // struct and skip the entry. + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + if sf.CanSet() { + sf.Set(v) + } + } + + return reflectwalk.SkipEntry + + case "ignore": + // Do nothing + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/copystructure/go.mod b/vendor/github.com/mitchellh/copystructure/go.mod new file mode 100644 index 00000000..cd9c050c --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.mod @@ -0,0 +1,5 @@ +module github.com/mitchellh/copystructure + +go 1.15 + +require github.com/mitchellh/reflectwalk v1.0.2 diff --git a/vendor/github.com/mitchellh/copystructure/go.sum b/vendor/github.com/mitchellh/copystructure/go.sum new file mode 100644 index 00000000..3e38da1e --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.sum @@ -0,0 +1,2 @@ +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 00000000..ac82cd2e --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/go.mod b/vendor/github.com/mitchellh/reflectwalk/go.mod new file mode 100644 index 00000000..52bb7c46 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/reflectwalk diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 00000000..6a7f1761 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 00000000..70760cf4 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} + +func (i Location) String() string { + if i >= Location(len(_Location_index)-1) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 00000000..7fee7b05 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,420 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// PointerValueWalker implementations are notified with the value of +// a particular pointer when a pointer is walked. Pointer is called +// right before PointerEnter. +type PointerValueWalker interface { + Pointer(reflect.Value) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + if pw, ok := w.(PointerValueWalker); ok { + if err = pw.Pointer(pointerV); err != nil { + if err == SkipEntry { + // Skip the rest of this entry but clear the error + return nil + } + + return + } + } + + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + // get the map value again as it may have changed in the MapElem call + if err := walk(v.MapIndex(k), w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2e8a869d..54f233ab 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -145,11 +145,16 @@ github.com/json-iterator/go github.com/liggitt/tabwriter # github.com/magiconair/properties v1.8.5 github.com/magiconair/properties +# github.com/mitchellh/copystructure v1.2.0 +## explicit +github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir # github.com/mitchellh/mapstructure v1.4.1 github.com/mitchellh/mapstructure +# github.com/mitchellh/reflectwalk v1.0.2 +github.com/mitchellh/reflectwalk # github.com/moby/sys/mount v0.2.0 ## explicit github.com/moby/sys/mount From cabfe98023d1bb7901c1d0a74bfb496ff869ba9f Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Thu, 17 Jun 2021 15:17:48 +0200 Subject: [PATCH 05/46] init enhanced templating for the nginx config --- proxy/Dockerfile | 1 + proxy/conf.d/nginx.toml | 6 ++--- proxy/nginx-proxy | 4 +++- proxy/templates/nginx.tmpl | 45 ++++++++++++++++---------------------- proxy/test/portmap.yaml | 7 ++++++ 5 files changed, 32 insertions(+), 31 deletions(-) create mode 100644 proxy/test/portmap.yaml diff --git a/proxy/Dockerfile b/proxy/Dockerfile index 105e89c4..300c37a1 100644 --- a/proxy/Dockerfile +++ b/proxy/Dockerfile @@ -11,5 +11,6 @@ RUN echo "Building for '${OS}/${ARCH}'..." \ COPY templates /etc/confd/templates/ COPY conf.d /etc/confd/conf.d/ COPY nginx-proxy /usr/bin/ +COPY test/portmap.yaml /etc/confd/values.yaml ENTRYPOINT nginx-proxy \ No newline at end of file diff --git a/proxy/conf.d/nginx.toml b/proxy/conf.d/nginx.toml index 6586eb9c..26decd79 100644 --- a/proxy/conf.d/nginx.toml +++ b/proxy/conf.d/nginx.toml @@ -2,7 +2,5 @@ src = "nginx.tmpl" dest = "/etc/nginx/nginx.conf" keys = [ - "SERVERS", - "PORTS", - "UDP_PORTS", -] + "ports" +] \ No newline at end of file diff --git a/proxy/nginx-proxy b/proxy/nginx-proxy index 39722944..1a1cd53b 100755 --- a/proxy/nginx-proxy +++ b/proxy/nginx-proxy @@ -1,7 +1,9 @@ #!/bin/sh # Run confd -confd -onetime -backend env +set -e +confd -onetime -backend file -file /etc/confd/values.yaml -log-level debug +set +e # Output Configuration echo "===== Initial nginx configuration =====" diff --git a/proxy/templates/nginx.tmpl b/proxy/templates/nginx.tmpl index 43e4a286..963b61dd 100644 --- a/proxy/templates/nginx.tmpl +++ b/proxy/templates/nginx.tmpl @@ -1,3 +1,10 @@ +################################### +# Generated by confd {{datetime}} # +# ####### # +# # k3d # # +# ####### # +################################### + {{- $servers := split (getenv "SERVERS") "," -}} {{- $ports := split (getenv "PORTS") "," -}} {{- $udp_ports := split (getenv "UDP_PORTS") "," -}} @@ -12,42 +19,28 @@ events { stream { - ####### - # TCP # - ####### - {{- range $port := $ports }} - upstream server_nodes_{{ $port }} { - {{- range $server := $servers }} - server {{ $server }}:{{ $port }} max_fails=1 fail_timeout=10s; - {{- end }} - } + {{- range $portstring := lsdir "/ports" }} - server { - listen {{ $port }}; - proxy_pass server_nodes_{{ $port }}; - proxy_timeout 600; - proxy_connect_timeout 2s; - } - {{- end }} - ####### - # UDP # - ####### + {{- $portdir := printf "/ports/%s/*" $portstring -}} + {{- $port := index (split $portstring ".") 0 -}} + {{- $protocol := index (split $portstring ".") 1 -}} + {{- $upstream := replace $portstring "." "_" -1 }} - {{- range $port := $udp_ports }} - {{- if $port }} - upstream server_nodes_udp_{{ $port }} { - {{- range $server := $servers }} + upstream {{ $upstream }} { + {{- range $server := getvs $portdir }} server {{ $server }}:{{ $port }} max_fails=1 fail_timeout=10s; {{- end }} } server { - listen {{ $port }} udp; - proxy_pass server_nodes_udp_{{ $port }}; + listen {{ $port }} {{- if (eq $protocol "udp") }} udp{{- end -}}; + proxy_pass {{ $upstream }}; proxy_timeout 600; proxy_connect_timeout 2s; } + + {{- end }} - {{- end }} + } diff --git a/proxy/test/portmap.yaml b/proxy/test/portmap.yaml new file mode 100644 index 00000000..02df30c3 --- /dev/null +++ b/proxy/test/portmap.yaml @@ -0,0 +1,7 @@ +ports: + 1234.tcp: + - server-0 + - server-1 + 4321.udp: + - agent-0 + - agent-1 \ No newline at end of file From a928e41a2a6b54a89d78a34cf7163fa5d0981c7e Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Thu, 17 Jun 2021 16:18:35 +0200 Subject: [PATCH 06/46] add debug functionality to get current loadbalancer config --- cmd/debug/debug.go | 91 ++++++++++++++++++++++++++++++++++++++ cmd/root.go | 2 + pkg/client/loadbalancer.go | 39 ++++++++++++++++ pkg/types/types.go | 20 +++++++++ 4 files changed, 152 insertions(+) create mode 100644 cmd/debug/debug.go diff --git a/cmd/debug/debug.go b/cmd/debug/debug.go new file mode 100644 index 00000000..c524790b --- /dev/null +++ b/cmd/debug/debug.go @@ -0,0 +1,91 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package debug + +import ( + "fmt" + + "github.com/rancher/k3d/v4/pkg/client" + "github.com/rancher/k3d/v4/pkg/runtimes" + "github.com/rancher/k3d/v4/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" +) + +// NewCmdDebug returns a new cobra command +func NewCmdDebug() *cobra.Command { + cmd := &cobra.Command{ + Use: "debug", + Hidden: true, + Short: "Debug k3d cluster(s)", + Long: `Debug k3d cluster(s)`, + Run: func(cmd *cobra.Command, args []string) { + if err := cmd.Help(); err != nil { + log.Errorln("Couldn't get help text") + log.Fatalln(err) + } + }, + } + + cmd.AddCommand(NewCmdDebugLoadbalancer()) + + return cmd +} + +func NewCmdDebugLoadbalancer() *cobra.Command { + cmd := &cobra.Command{ + Use: "loadbalancer", + Aliases: []string{"lb"}, + Short: "Debug the loadbalancer", + Long: `Debug the loadbalancer`, + Run: func(cmd *cobra.Command, args []string) { + if err := cmd.Help(); err != nil { + log.Errorln("Couldn't get help text") + log.Fatalln(err) + } + }, + } + + cmd.AddCommand(&cobra.Command{ + Use: "get-config", + Args: cobra.ExactArgs(1), // cluster name + Run: func(cmd *cobra.Command, args []string) { + c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &types.Cluster{Name: args[0]}) + if err != nil { + log.Fatalln(err) + } + + lbconf, err := client.GetLoadbalancerConfig(cmd.Context(), runtimes.SelectedRuntime, c) + if err != nil { + log.Fatalln(err) + } + yamlized, err := yaml.Marshal(lbconf) + if err != nil { + log.Fatalln(err) + } + fmt.Println(string(yamlized)) + }, + }) + + return cmd +} diff --git a/cmd/root.go b/cmd/root.go index 418e4008..8eb9c9d5 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -34,6 +34,7 @@ import ( "github.com/rancher/k3d/v4/cmd/cluster" cfg "github.com/rancher/k3d/v4/cmd/config" + "github.com/rancher/k3d/v4/cmd/debug" "github.com/rancher/k3d/v4/cmd/image" "github.com/rancher/k3d/v4/cmd/kubeconfig" "github.com/rancher/k3d/v4/cmd/node" @@ -116,6 +117,7 @@ func init() { rootCmd.AddCommand(image.NewCmdImage()) rootCmd.AddCommand(cfg.NewCmdConfig()) rootCmd.AddCommand(registry.NewCmdRegistry()) + rootCmd.AddCommand(debug.NewCmdDebug()) rootCmd.AddCommand(&cobra.Command{ Use: "version", diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index f0fa74fc..489a4ef1 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -22,13 +22,17 @@ THE SOFTWARE. package client import ( + "bytes" "context" "fmt" + "io/ioutil" "strings" "github.com/rancher/k3d/v4/pkg/runtimes" + "github.com/rancher/k3d/v4/pkg/types" k3d "github.com/rancher/k3d/v4/pkg/types" log "github.com/sirupsen/logrus" + "sigs.k8s.io/yaml" ) // UpdateLoadbalancerConfig updates the loadbalancer config with an updated list of servers belonging to that cluster @@ -70,3 +74,38 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return nil } + +func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*types.LoadbalancerConfig, error) { + + if cluster.ServerLoadBalancer == nil { + for _, node := range cluster.Nodes { + if node.Role == types.LoadBalancerRole { + var err error + cluster.ServerLoadBalancer, err = NodeGet(ctx, runtime, node) + if err != nil { + return nil, err + } + } + } + } + + reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer) + if err != nil { + return &k3d.LoadbalancerConfig{}, err + } + defer reader.Close() + + file, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + file = bytes.Trim(file[512:], "\x00") // trim control characters, etc. + + currentConfig := &types.LoadbalancerConfig{} + if err := yaml.Unmarshal(file, currentConfig); err != nil { + return nil, err + } + + return currentConfig, nil +} diff --git a/pkg/types/types.go b/pkg/types/types.go index 3d5fa830..18bedf85 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -426,3 +426,23 @@ type RegistryExternal struct { Host string `yaml:"host" json:"host"` Port string `yaml:"port" json:"port"` } + +/* + * Loadbalancer + */ + +/* LoadbalancerConfig defines the coarse file structure to configure the k3d-proxy + * Example: + * ports: + * 1234.tcp: + * - k3d-k3s-default-server-0 + * - k3d-k3s-default-server-1 + * 4321.udp: + * - k3d-k3s-default-agent-0 + * - k3d-k3s-default-agent-1 + */ +type LoadbalancerConfig struct { + Ports map[string][]string `yaml:"ports"` +} + +const DefaultLoadbalancerConfigPath = "/etc/confd/portmap.yaml" From 12527f8a8de77e0cad84ffe7ae71c6cd6bb99e36 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 11:04:39 +0200 Subject: [PATCH 07/46] move loadbalancer creation to separate function --- pkg/client/cluster.go | 54 +--------------------------------- pkg/client/loadbalancer.go | 59 ++++++++++++++++++++++++++++++++++++++ pkg/types/types.go | 4 +++ 3 files changed, 64 insertions(+), 53 deletions(-) diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index e04484c1..6fdf2e7a 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -28,7 +28,6 @@ import ( "fmt" "sort" "strconv" - "strings" "time" gort "runtime" @@ -44,7 +43,6 @@ import ( k3d "github.com/rancher/k3d/v4/pkg/types" "github.com/rancher/k3d/v4/pkg/types/k3s" "github.com/rancher/k3d/v4/pkg/util" - "github.com/rancher/k3d/v4/version" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" ) @@ -509,59 +507,9 @@ ClusterCreatOpts: */ // *** ServerLoadBalancer *** if !clusterCreateOpts.DisableLoadBalancer { - // Generate a comma-separated list of server/server names to pass to the LB container - servers := "" - for _, node := range cluster.Nodes { - if node.Role == k3d.ServerRole { - if servers == "" { - servers = node.Name - } else { - servers = fmt.Sprintf("%s,%s", servers, node.Name) - } - } - } - - // generate comma-separated list of extra ports to forward - ports := []string{k3d.DefaultAPIPort} - var udp_ports []string - for exposedPort := range cluster.ServerLoadBalancer.Ports { - if exposedPort.Proto() == "udp" { - udp_ports = append(udp_ports, exposedPort.Port()) - continue - } - ports = append(ports, exposedPort.Port()) - } - - if cluster.ServerLoadBalancer.Ports == nil { - cluster.ServerLoadBalancer.Ports = nat.PortMap{} - } - cluster.ServerLoadBalancer.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} - - // Create LB as a modified node with loadbalancerRole - lbNode := &k3d.Node{ - Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), - Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()), - Ports: cluster.ServerLoadBalancer.Ports, - Env: []string{ - fmt.Sprintf("SERVERS=%s", servers), - fmt.Sprintf("PORTS=%s", strings.Join(ports, ",")), - fmt.Sprintf("WORKER_PROCESSES=%d", len(ports)), - }, - Role: k3d.LoadBalancerRole, - RuntimeLabels: clusterCreateOpts.GlobalLabels, // TODO: createLoadBalancer: add more expressive labels - Networks: []string{cluster.Network.Name}, - Restart: true, - } - if len(udp_ports) > 0 { - lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) - } - cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback - log.Infof("Creating LoadBalancer '%s'", lbNode.Name) - if err := NodeCreate(clusterCreateCtx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { - log.Errorln("Failed to create loadbalancer") + if err := LoadbalancerCreate(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}); err != nil { return err } - log.Debugf("Created loadbalancer '%s'", lbNode.Name) } return nil diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index 489a4ef1..f76440f3 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -28,9 +28,11 @@ import ( "io/ioutil" "strings" + "github.com/docker/go-connections/nat" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/types" k3d "github.com/rancher/k3d/v4/pkg/types" + "github.com/rancher/k3d/v4/version" log "github.com/sirupsen/logrus" "sigs.k8s.io/yaml" ) @@ -109,3 +111,60 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste return currentConfig, nil } + +func LoadbalancerCreate(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) error { + // Generate a comma-separated list of server/server names to pass to the LB container + servers := "" + for _, node := range cluster.Nodes { + if node.Role == k3d.ServerRole { + if servers == "" { + servers = node.Name + } else { + servers = fmt.Sprintf("%s,%s", servers, node.Name) + } + } + } + + // generate comma-separated list of extra ports to forward + ports := []string{k3d.DefaultAPIPort} + var udp_ports []string + for exposedPort := range cluster.ServerLoadBalancer.Ports { + if exposedPort.Proto() == "udp" { + udp_ports = append(udp_ports, exposedPort.Port()) + continue + } + ports = append(ports, exposedPort.Port()) + } + + if cluster.ServerLoadBalancer.Ports == nil { + cluster.ServerLoadBalancer.Ports = nat.PortMap{} + } + cluster.ServerLoadBalancer.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} + + // Create LB as a modified node with loadbalancerRole + lbNode := &k3d.Node{ + Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), + Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()), + Ports: cluster.ServerLoadBalancer.Ports, + Env: []string{ + fmt.Sprintf("SERVERS=%s", servers), + fmt.Sprintf("PORTS=%s", strings.Join(ports, ",")), + fmt.Sprintf("WORKER_PROCESSES=%d", len(ports)), + }, + Role: k3d.LoadBalancerRole, + RuntimeLabels: opts.Labels, // TODO: createLoadBalancer: add more expressive labels + Networks: []string{cluster.Network.Name}, + Restart: true, + } + if len(udp_ports) > 0 { + lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) + } + cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback + log.Infof("Creating LoadBalancer '%s'", lbNode.Name) + if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { + log.Errorln("Failed to create loadbalancer") + return err + } + log.Debugf("Created loadbalancer '%s'", lbNode.Name) + return nil +} diff --git a/pkg/types/types.go b/pkg/types/types.go index 18bedf85..24fc41f4 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -446,3 +446,7 @@ type LoadbalancerConfig struct { } const DefaultLoadbalancerConfigPath = "/etc/confd/portmap.yaml" + +type LoadbalancerCreateOpts struct { + Labels map[string]string +} From 220313524e28a942ad28932b9ed921638e9e66a1 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 12:57:27 +0200 Subject: [PATCH 08/46] generate new config file for lb and add helper functions to get images from env if needed --- pkg/client/cluster.go | 9 ++++- pkg/client/loadbalancer.go | 74 +++++++++++++++++++++----------------- pkg/types/images.go | 48 +++++++++++++++++++++++++ pkg/types/types.go | 12 +++++-- proxy/conf.d/nginx.toml | 3 +- proxy/templates/nginx.tmpl | 5 +-- proxy/test/portmap.yaml | 5 ++- 7 files changed, 114 insertions(+), 42 deletions(-) create mode 100644 pkg/types/images.go diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index 6fdf2e7a..4905f692 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -507,9 +507,16 @@ ClusterCreatOpts: */ // *** ServerLoadBalancer *** if !clusterCreateOpts.DisableLoadBalancer { - if err := LoadbalancerCreate(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}); err != nil { + node, nodeCreateOpts, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}) + if err != nil { + return err + } + if err := NodeCreate(ctx, runtime, node, *nodeCreateOpts); err != nil { + log.Errorln("Failed to create loadbalancer") return err } + log.Debugf("Created loadbalancer '%s'", node.Name) + return err } return nil diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index f76440f3..db47c494 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -29,10 +29,10 @@ import ( "strings" "github.com/docker/go-connections/nat" + "github.com/rancher/k3d/v4/pkg/actions" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/types" k3d "github.com/rancher/k3d/v4/pkg/types" - "github.com/rancher/k3d/v4/version" log "github.com/sirupsen/logrus" "sigs.k8s.io/yaml" ) @@ -112,28 +112,28 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste return currentConfig, nil } -func LoadbalancerCreate(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) error { - // Generate a comma-separated list of server/server names to pass to the LB container - servers := "" +func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, *k3d.NodeCreateOpts, error) { + + lbConfig := k3d.LoadbalancerConfig{ + Ports: map[string][]string{}, + Settings: k3d.LoadBalancerSettings{}, + } + + // get list of server nodes + servers := []string{} for _, node := range cluster.Nodes { if node.Role == k3d.ServerRole { - if servers == "" { - servers = node.Name - } else { - servers = fmt.Sprintf("%s,%s", servers, node.Name) - } + servers = append(servers, node.Name) } } - // generate comma-separated list of extra ports to forward - ports := []string{k3d.DefaultAPIPort} - var udp_ports []string + // Default API Port proxied to the server nodes + lbConfig.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)] = servers + + // generate comma-separated list of extra ports to forward // TODO: no default targets? for exposedPort := range cluster.ServerLoadBalancer.Ports { - if exposedPort.Proto() == "udp" { - udp_ports = append(udp_ports, exposedPort.Port()) - continue - } - ports = append(ports, exposedPort.Port()) + // TODO: catch duplicates here? + lbConfig.Ports[fmt.Sprintf("%s.%s", exposedPort.Port(), exposedPort.Proto())] = servers } if cluster.ServerLoadBalancer.Ports == nil { @@ -143,28 +143,36 @@ func LoadbalancerCreate(ctx context.Context, runtime runtimes.Runtime, cluster * // Create LB as a modified node with loadbalancerRole lbNode := &k3d.Node{ - Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), - Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()), - Ports: cluster.ServerLoadBalancer.Ports, - Env: []string{ - fmt.Sprintf("SERVERS=%s", servers), - fmt.Sprintf("PORTS=%s", strings.Join(ports, ",")), - fmt.Sprintf("WORKER_PROCESSES=%d", len(ports)), - }, + Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), + Image: k3d.GetLoadbalancerImage(), + Ports: cluster.ServerLoadBalancer.Ports, Role: k3d.LoadBalancerRole, RuntimeLabels: opts.Labels, // TODO: createLoadBalancer: add more expressive labels Networks: []string{cluster.Network.Name}, Restart: true, } - if len(udp_ports) > 0 { - lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) - } cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback log.Infof("Creating LoadBalancer '%s'", lbNode.Name) - if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { - log.Errorln("Failed to create loadbalancer") - return err + + // some additional nginx settings + lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Ports)*len(servers) + + // prepare to write config to lb container + configyaml, err := yaml.Marshal(lbConfig) + if err != nil { + return nil, nil, err } - log.Debugf("Created loadbalancer '%s'", lbNode.Name) - return nil + + writeLbConfigAction := k3d.NodeHook{ + Stage: k3d.LifecycleStagePreStart, + Action: actions.WriteFileAction{ + Runtime: runtime, + Dest: k3d.DefaultLoadbalancerConfigPath, + Mode: 0744, + Content: configyaml, + }, + } + + return lbNode, &k3d.NodeCreateOpts{NodeHooks: []k3d.NodeHook{writeLbConfigAction}}, nil + } diff --git a/pkg/types/images.go b/pkg/types/images.go new file mode 100644 index 00000000..713ab0a7 --- /dev/null +++ b/pkg/types/images.go @@ -0,0 +1,48 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package types + +import ( + "fmt" + "os" + + "github.com/rancher/k3d/v4/version" + log "github.com/sirupsen/logrus" +) + +func GetLoadbalancerImage() string { + if img := os.Getenv("K3D_IMAGE_LOADBALANCER"); img != "" { + log.Infof("Loadbalancer image set from env var $K3D_IMAGE_LOADBALANCER: %s", img) + return img + } + + return fmt.Sprintf("%s:%s", DefaultLBImageRepo, version.GetHelperImageVersion()) +} + +func GetToolsImage() string { + if img := os.Getenv("K3D_IMAGE_TOOLS"); img != "" { + log.Infof("Tools image set from env var $K3D_IMAGE_TOOLS: %s", img) + return img + } + + return fmt.Sprintf("%s:%s", DefaultToolsImageRepo, version.GetHelperImageVersion()) +} diff --git a/pkg/types/types.go b/pkg/types/types.go index 24fc41f4..141fe2c6 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -442,10 +442,18 @@ type RegistryExternal struct { * - k3d-k3s-default-agent-1 */ type LoadbalancerConfig struct { - Ports map[string][]string `yaml:"ports"` + Ports map[string][]string `yaml:"ports"` + Settings LoadBalancerSettings `yaml:"settings"` } -const DefaultLoadbalancerConfigPath = "/etc/confd/portmap.yaml" +type LoadBalancerSettings struct { + WorkerProcesses int `yaml:"workerProcesses"` +} + +const ( + DefaultLoadbalancerConfigPath = "/etc/confd/portmap.yaml" + DefaultLoadbalancerWorkerProcesses = 1024 +) type LoadbalancerCreateOpts struct { Labels map[string]string diff --git a/proxy/conf.d/nginx.toml b/proxy/conf.d/nginx.toml index 26decd79..96805409 100644 --- a/proxy/conf.d/nginx.toml +++ b/proxy/conf.d/nginx.toml @@ -2,5 +2,6 @@ src = "nginx.tmpl" dest = "/etc/nginx/nginx.conf" keys = [ - "ports" + "ports", + "settings" ] \ No newline at end of file diff --git a/proxy/templates/nginx.tmpl b/proxy/templates/nginx.tmpl index 963b61dd..fa8c3087 100644 --- a/proxy/templates/nginx.tmpl +++ b/proxy/templates/nginx.tmpl @@ -5,16 +5,13 @@ # ####### # ################################### -{{- $servers := split (getenv "SERVERS") "," -}} -{{- $ports := split (getenv "PORTS") "," -}} -{{- $udp_ports := split (getenv "UDP_PORTS") "," -}} error_log stderr notice; worker_processes auto; events { multi_accept on; use epoll; - worker_connections {{ add 1024 (len $ports) }}; + worker_connections {{ getv "/settings/workerProcesses" }}; } stream { diff --git a/proxy/test/portmap.yaml b/proxy/test/portmap.yaml index 02df30c3..d102645b 100644 --- a/proxy/test/portmap.yaml +++ b/proxy/test/portmap.yaml @@ -4,4 +4,7 @@ ports: - server-1 4321.udp: - agent-0 - - agent-1 \ No newline at end of file + - agent-1 + +settings: + workerProcesses: 1030 \ No newline at end of file From a1977a2d3117db4575b7f7965572398353d6ef44 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 12:59:17 +0200 Subject: [PATCH 09/46] move defaults for images to new file --- pkg/types/images.go | 15 +++++++++++++++ pkg/types/types.go | 15 --------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/pkg/types/images.go b/pkg/types/images.go index 713ab0a7..d6d6c7a9 100644 --- a/pkg/types/images.go +++ b/pkg/types/images.go @@ -29,6 +29,21 @@ import ( log "github.com/sirupsen/logrus" ) +// DefaultK3sImageRepo specifies the default image repository for the used k3s image +const DefaultK3sImageRepo = "docker.io/rancher/k3s" + +// DefaultLBImageRepo defines the default cluster load balancer image +const DefaultLBImageRepo = "docker.io/rancher/k3d-proxy" + +// DefaultToolsImageRepo defines the default image used for the tools container +const DefaultToolsImageRepo = "docker.io/rancher/k3d-tools" + +// DefaultRegistryImageRepo defines the default image used for the k3d-managed registry +const DefaultRegistryImageRepo = "docker.io/library/registry" + +// DefaultRegistryImageTag defines the default image tag used for the k3d-managed registry +const DefaultRegistryImageTag = "2" + func GetLoadbalancerImage() string { if img := os.Getenv("K3D_IMAGE_LOADBALANCER"); img != "" { log.Infof("Loadbalancer image set from env var $K3D_IMAGE_LOADBALANCER: %s", img) diff --git a/pkg/types/types.go b/pkg/types/types.go index 141fe2c6..4e4d06f1 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -41,21 +41,6 @@ const DefaultClusterName = "k3s-default" // ... and still stay within the 64 character limit (e.g. of docker) const DefaultClusterNameMaxLength = 32 -// DefaultK3sImageRepo specifies the default image repository for the used k3s image -const DefaultK3sImageRepo = "docker.io/rancher/k3s" - -// DefaultLBImageRepo defines the default cluster load balancer image -const DefaultLBImageRepo = "docker.io/rancher/k3d-proxy" - -// DefaultToolsImageRepo defines the default image used for the tools container -const DefaultToolsImageRepo = "docker.io/rancher/k3d-tools" - -// DefaultRegistryImageRepo defines the default image used for the k3d-managed registry -const DefaultRegistryImageRepo = "docker.io/library/registry" - -// DefaultRegistryImageTag defines the default image tag used for the k3d-managed registry -const DefaultRegistryImageTag = "2" - // DefaultObjectNamePrefix defines the name prefix for every object created by k3d const DefaultObjectNamePrefix = "k3d" From e78e2c127e0ea1a13ce6e7a7207673e4d0185ba9 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 13:20:07 +0200 Subject: [PATCH 10/46] proxy: add Makefile and remove test file from Dockerfile --- proxy/Dockerfile | 1 - proxy/Makefile | 5 +++++ 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 proxy/Makefile diff --git a/proxy/Dockerfile b/proxy/Dockerfile index 300c37a1..105e89c4 100644 --- a/proxy/Dockerfile +++ b/proxy/Dockerfile @@ -11,6 +11,5 @@ RUN echo "Building for '${OS}/${ARCH}'..." \ COPY templates /etc/confd/templates/ COPY conf.d /etc/confd/conf.d/ COPY nginx-proxy /usr/bin/ -COPY test/portmap.yaml /etc/confd/values.yaml ENTRYPOINT nginx-proxy \ No newline at end of file diff --git a/proxy/Makefile b/proxy/Makefile new file mode 100644 index 00000000..6555f6ae --- /dev/null +++ b/proxy/Makefile @@ -0,0 +1,5 @@ +.PHONY: test + +test: + docker build . -t rancher/k3d-proxy:dev + docker run --rm -v $(shell pwd)/test/portmap.yaml:/etc/confd/values.yaml rancher/k3d-proxy:dev \ No newline at end of file From 364e96bf1fefcc76d46ed134e9889ded3ca9c2f2 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 13:20:58 +0200 Subject: [PATCH 11/46] types/node: add nodehooks to node struct to have them around at any stage (TO-DO: use this everywhere -> breaking change) --- pkg/client/cluster.go | 37 ++++++++++++++++++++++++++++++++----- pkg/client/loadbalancer.go | 35 ++++++++++------------------------- pkg/types/types.go | 3 ++- 3 files changed, 44 insertions(+), 31 deletions(-) diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index 4905f692..27225ed9 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -507,15 +507,40 @@ ClusterCreatOpts: */ // *** ServerLoadBalancer *** if !clusterCreateOpts.DisableLoadBalancer { - node, nodeCreateOpts, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}) + lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}) if err != nil { return err } - if err := NodeCreate(ctx, runtime, node, *nodeCreateOpts); err != nil { - log.Errorln("Failed to create loadbalancer") + cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback + + lbConfig, err := LoadbalancerGenerateConfig(cluster) + if err != nil { + return fmt.Errorf("error generating loadbalancer config: %v", err) + } + + // prepare to write config to lb container + configyaml, err := yaml.Marshal(lbConfig) + if err != nil { return err } - log.Debugf("Created loadbalancer '%s'", node.Name) + + writeLbConfigAction := k3d.NodeHook{ + Stage: k3d.LifecycleStagePreStart, + Action: actions.WriteFileAction{ + Runtime: runtime, + Dest: k3d.DefaultLoadbalancerConfigPath, + Mode: 0744, + Content: configyaml, + }, + } + + lbNode.HookActions = append(lbNode.HookActions, writeLbConfigAction) + + log.Infof("Creating LoadBalancer '%s'", lbNode.Name) + if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { + return fmt.Errorf("error creating loadbalancer: %v", err) + } + log.Debugf("Created loadbalancer '%s'", lbNode.Name) return err } @@ -865,7 +890,9 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust log.Infoln("Starting helpers...") failedHelpers := 0 for _, helperNode := range aux { - nodeStartOpts := k3d.NodeStartOpts{} + nodeStartOpts := k3d.NodeStartOpts{ + NodeHooks: helperNode.HookActions, + } if helperNode.Role == k3d.LoadBalancerRole { nodeStartOpts.Wait = true } diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index db47c494..2a1debcc 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -29,7 +29,6 @@ import ( "strings" "github.com/docker/go-connections/nat" - "github.com/rancher/k3d/v4/pkg/actions" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/types" k3d "github.com/rancher/k3d/v4/pkg/types" @@ -112,8 +111,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste return currentConfig, nil } -func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, *k3d.NodeCreateOpts, error) { - +func LoadbalancerGenerateConfig(cluster *k3d.Cluster) (k3d.LoadbalancerConfig, error) { lbConfig := k3d.LoadbalancerConfig{ Ports: map[string][]string{}, Settings: k3d.LoadBalancerSettings{}, @@ -136,6 +134,14 @@ func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster lbConfig.Ports[fmt.Sprintf("%s.%s", exposedPort.Port(), exposedPort.Proto())] = servers } + // some additional nginx settings + lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Ports)*len(servers) + + return lbConfig, nil +} + +func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, error) { + if cluster.ServerLoadBalancer.Ports == nil { cluster.ServerLoadBalancer.Ports = nat.PortMap{} } @@ -151,28 +157,7 @@ func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster Networks: []string{cluster.Network.Name}, Restart: true, } - cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback - log.Infof("Creating LoadBalancer '%s'", lbNode.Name) - - // some additional nginx settings - lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Ports)*len(servers) - - // prepare to write config to lb container - configyaml, err := yaml.Marshal(lbConfig) - if err != nil { - return nil, nil, err - } - - writeLbConfigAction := k3d.NodeHook{ - Stage: k3d.LifecycleStagePreStart, - Action: actions.WriteFileAction{ - Runtime: runtime, - Dest: k3d.DefaultLoadbalancerConfigPath, - Mode: 0744, - Content: configyaml, - }, - } - return lbNode, &k3d.NodeCreateOpts{NodeHooks: []k3d.NodeHook{writeLbConfigAction}}, nil + return lbNode, nil } diff --git a/pkg/types/types.go b/pkg/types/types.go index 4e4d06f1..f96377cf 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -333,6 +333,7 @@ type Node struct { Memory string // filled automatically State NodeState // filled automatically IP NodeIP // filled automatically + HookActions []NodeHook `yaml:"hooks" json:"hooks,omitempty"` } // ServerOpts describes some additional server role specific opts @@ -436,7 +437,7 @@ type LoadBalancerSettings struct { } const ( - DefaultLoadbalancerConfigPath = "/etc/confd/portmap.yaml" + DefaultLoadbalancerConfigPath = "/etc/confd/values.yaml" DefaultLoadbalancerWorkerProcesses = 1024 ) From 0fbf03a080b12370afb2a558ada042a005f3fdae Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 13:44:46 +0200 Subject: [PATCH 12/46] tests/e2e: ensure that we use a cluster create timeout everywhere --- tests/test_ipam.sh | 2 +- tests/test_memory_limits.sh | 2 +- tests/test_node_edit.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_ipam.sh b/tests/test_ipam.sh index 1d5e56d2..194c7822 100755 --- a/tests/test_ipam.sh +++ b/tests/test_ipam.sh @@ -18,7 +18,7 @@ expectedIPServer0="$expectedIPLabelServer0/16" # k3d excludes the subnet_start ( expectedIPServerLB="172.45.0.3/16" info "Creating cluster $clustername..." -$EXE cluster create $clustername --subnet $subnet || failed "could not create cluster $clustername" +$EXE cluster create $clustername --timeout 360s --subnet $subnet || failed "could not create cluster $clustername" info "Checking we have access to the cluster..." check_clusters "$clustername" || failed "error checking cluster" diff --git a/tests/test_memory_limits.sh b/tests/test_memory_limits.sh index a5b81501..934a0f38 100755 --- a/tests/test_memory_limits.sh +++ b/tests/test_memory_limits.sh @@ -13,7 +13,7 @@ highlight "[START] MemoryLimitTest $EXTRA_TITLE" clustername="memlimittest" info "Creating cluster $clustername..." -$EXE cluster create $clustername --servers-memory 1g --agents 1 --agents-memory 1.5g || failed "could not create cluster $clustername" +$EXE cluster create $clustername --timeout 360s --servers-memory 1g --agents 1 --agents-memory 1.5g || failed "could not create cluster $clustername" info "Checking we have access to the cluster..." check_clusters "$clustername" || failed "error checking cluster" diff --git a/tests/test_node_edit.sh b/tests/test_node_edit.sh index b9a5de04..80f896d3 100755 --- a/tests/test_node_edit.sh +++ b/tests/test_node_edit.sh @@ -18,7 +18,7 @@ newPortMappingHostPort="3333" newPortMappingContainerPort="4444" info "Creating cluster $clustername..." -$EXE cluster create $clustername --port "$existingPortMappingHostPort:$existingPortMappingContainerPort@loadbalancer" || failed "could not create cluster $clustername" +$EXE cluster create $clustername --timeout 360s --port "$existingPortMappingHostPort:$existingPortMappingContainerPort@loadbalancer" || failed "could not create cluster $clustername" info "Checking cluster access..." check_clusters "$clustername" || failed "error checking cluster access" From 92b7689f9dfb8e57d6364ba1dfcbef4fc7598f57 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 14:28:41 +0200 Subject: [PATCH 13/46] nodeAdd: do not copy ports from existing node (+ some cleanup) --- pkg/client/node.go | 86 ++++++++++++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 38 deletions(-) diff --git a/pkg/client/node.go b/pkg/client/node.go index 019a901b..f981d304 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -70,36 +70,75 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N node.Env = []string{} // copy labels and env vars from a similar node in the selected cluster - var chosenNode *k3d.Node + var srcNode *k3d.Node for _, existingNode := range cluster.Nodes { if existingNode.Role == node.Role { - chosenNode = existingNode + srcNode = existingNode break } } // if we didn't find a node with the same role in the cluster, just choose any other node - if chosenNode == nil { + if srcNode == nil { log.Debugf("Didn't find node with role '%s' in cluster '%s'. Choosing any other node (and using defaults)...", node.Role, cluster.Name) node.Cmd = k3d.DefaultRoleCmds[node.Role] for _, existingNode := range cluster.Nodes { if existingNode.Role != k3d.LoadBalancerRole { // any role except for the LoadBalancer role - chosenNode = existingNode + srcNode = existingNode break } } } // get node details - chosenNode, err = NodeGet(ctx, runtime, chosenNode) + srcNode, err = NodeGet(ctx, runtime, srcNode) if err != nil { return err } - log.Debugf("Adding node %+v \n>>> to cluster %+v\n>>> based on existing node %+v", node, cluster, chosenNode) + /* + * Sanitize Source Node + * -> remove fields that are not safe to copy as they break something down the stream + */ + + // TODO: I guess proper deduplication can be handled in a cleaner/better way or at the infofaker level at some point + for _, forbiddenMount := range util.DoNotCopyVolumeSuffices { + for i, mount := range node.Volumes { + if strings.Contains(mount, forbiddenMount) { + log.Tracef("Dropping copied volume mount %s to avoid issues...", mount) + node.Volumes = util.RemoveElementFromStringSlice(node.Volumes, i) + } + } + } + + // drop port mappings as we cannot use the same port mapping for a two nodes (port collisions) + srcNode.Ports = nat.PortMap{} + + // we cannot have two servers as init servers + if node.Role == k3d.ServerRole { + for _, forbiddenCmd := range k3d.DoNotCopyServerFlags { + for i, cmd := range srcNode.Cmd { + // cut out the '--cluster-init' flag as this should only be done by the initializing server node + if cmd == forbiddenCmd { + log.Tracef("Dropping '%s' from source node's cmd", forbiddenCmd) + srcNode.Cmd = append(srcNode.Cmd[:i], srcNode.Cmd[i+1:]...) + } + } + for i, arg := range node.Args { + // cut out the '--cluster-init' flag as this should only be done by the initializing server node + if arg == forbiddenCmd { + log.Tracef("Dropping '%s' from source node's args", forbiddenCmd) + srcNode.Args = append(srcNode.Args[:i], srcNode.Args[i+1:]...) + } + } + } + } + + log.Debugf("Adding node %s to cluster %s based on existing (sanitized) node %s", node.Name, cluster.Name, srcNode.Name) + log.Tracef("Sanitized Source Node: %+v\nNew Node: %+v", srcNode, node) // fetch registry config registryConfigBytes := []byte{} - registryConfigReader, err := runtime.ReadFromNode(ctx, k3d.DefaultRegistriesFilePath, chosenNode) + registryConfigReader, err := runtime.ReadFromNode(ctx, k3d.DefaultRegistriesFilePath, srcNode) if err != nil { if !errors.Is(err, runtimeErrors.ErrRuntimeFileNotFound) { log.Warnf("Failed to read registry config from node %s: %+v", node.Name, err) @@ -117,22 +156,12 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N } // merge node config of new node into existing node config - if err := mergo.MergeWithOverwrite(chosenNode, *node); err != nil { + if err := mergo.MergeWithOverwrite(srcNode, *node); err != nil { log.Errorln("Failed to merge new node config into existing node config") return err } - node = chosenNode - - // TODO: I guess proper deduplication can be handled in a cleaner/better way or at the infofaker level at some point - for _, forbiddenMount := range util.DoNotCopyVolumeSuffices { - for i, mount := range node.Volumes { - if strings.Contains(mount, forbiddenMount) { - log.Tracef("Dropping copied volume mount %s to avoid issues...", mount) - node.Volumes = util.RemoveElementFromStringSlice(node.Volumes, i) - } - } - } + node = srcNode log.Debugf("Resulting node %+v", node) @@ -151,25 +180,6 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N } } - if node.Role == k3d.ServerRole { - for _, forbiddenCmd := range k3d.DoNotCopyServerFlags { - for i, cmd := range node.Cmd { - // cut out the '--cluster-init' flag as this should only be done by the initializing server node - if cmd == forbiddenCmd { - log.Debugf("Dropping '%s' from node's cmd", forbiddenCmd) - node.Cmd = append(node.Cmd[:i], node.Cmd[i+1:]...) - } - } - for i, arg := range node.Args { - // cut out the '--cluster-init' flag as this should only be done by the initializing server node - if arg == forbiddenCmd { - log.Debugf("Dropping '%s' from node's args", forbiddenCmd) - node.Args = append(node.Args[:i], node.Args[i+1:]...) - } - } - } - } - // add node actions if len(registryConfigBytes) != 0 { if createNodeOpts.NodeHooks == nil { From f2cb7c4e0a9e2065ffd287f607aac275dda4b51d Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Mon, 21 Jun 2021 18:53:43 +0200 Subject: [PATCH 14/46] adapt updating the loadbalancer config when adding a new node --- pkg/client/loadbalancer.go | 59 ++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index 2a1debcc..0ed93bd1 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -29,11 +29,12 @@ import ( "strings" "github.com/docker/go-connections/nat" + "github.com/go-test/deep" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/types" k3d "github.com/rancher/k3d/v4/pkg/types" log "github.com/sirupsen/logrus" - "sigs.k8s.io/yaml" + "gopkg.in/yaml.v2" ) // UpdateLoadbalancerConfig updates the loadbalancer config with an updated list of servers belonging to that cluster @@ -47,25 +48,34 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return err } - // find the LoadBalancer for the target cluster - serverNodesList := []string{} - var loadbalancer *k3d.Node - for _, node := range cluster.Nodes { - if node.Role == k3d.LoadBalancerRole { // get the loadbalancer we want to update - loadbalancer = node - } else if node.Role == k3d.ServerRole { // create a list of server nodes - serverNodesList = append(serverNodesList, node.Name) - } + currentConfig, err := GetLoadbalancerConfig(ctx, runtime, cluster) + if err != nil { + return fmt.Errorf("error getting current config from loadbalancer: %w", err) + } + + log.Tracef("Current loadbalancer config:\n%+v", currentConfig) + + newLBConfig, err := LoadbalancerGenerateConfig(cluster) + if err != nil { + return fmt.Errorf("error generating new loadbalancer config: %w", err) } - serverNodes := strings.Join(serverNodesList, ",") - if loadbalancer == nil { - return fmt.Errorf("Failed to find loadbalancer for cluster '%s'", cluster.Name) + log.Tracef("New loadbalancer config:\n%+v", currentConfig) + + if diff := deep.Equal(currentConfig, newLBConfig); diff != nil { + log.Debugf("Updating the loadbalancer with this diff: %+v", diff) } - log.Debugf("Servers as passed to serverlb: '%s'", serverNodes) + newLbConfigYaml, err := yaml.Marshal(&newLBConfig) + if err != nil { + return fmt.Errorf("error marshalling the new loadbalancer config: %w", err) + } + log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml)) + if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil { + return fmt.Errorf("error writing new loadbalancer config to container: %w", err) + } - command := fmt.Sprintf("SERVERS=%s %s", serverNodes, "confd -onetime -backend env && nginx -s reload") - if err := runtime.ExecInNode(ctx, loadbalancer, []string{"sh", "-c", command}); err != nil { + command := "confd -onetime -backend file -file /etc/confd/values.yaml -log-level debug && nginx -s reload" + if err := runtime.ExecInNode(ctx, cluster.ServerLoadBalancer, []string{"sh", "-c", command}); err != nil { if strings.Contains(err.Error(), "host not found in upstream") { log.Warnf("Loadbalancer configuration updated, but one or more k3d nodes seem to be down, check the logs:\n%s", err.Error()) return nil @@ -76,7 +86,9 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return nil } -func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*types.LoadbalancerConfig, error) { +func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (types.LoadbalancerConfig, error) { + + var cfg k3d.LoadbalancerConfig if cluster.ServerLoadBalancer == nil { for _, node := range cluster.Nodes { @@ -84,7 +96,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste var err error cluster.ServerLoadBalancer, err = NodeGet(ctx, runtime, node) if err != nil { - return nil, err + return cfg, err } } } @@ -92,23 +104,22 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer) if err != nil { - return &k3d.LoadbalancerConfig{}, err + return cfg, err } defer reader.Close() file, err := ioutil.ReadAll(reader) if err != nil { - return nil, err + return cfg, err } file = bytes.Trim(file[512:], "\x00") // trim control characters, etc. - currentConfig := &types.LoadbalancerConfig{} - if err := yaml.Unmarshal(file, currentConfig); err != nil { - return nil, err + if err := yaml.Unmarshal(file, &cfg); err != nil { + return cfg, err } - return currentConfig, nil + return cfg, nil } func LoadbalancerGenerateConfig(cluster *k3d.Cluster) (k3d.LoadbalancerConfig, error) { From 97d8d085fd26efbeecd5aa7564941bd9efdf827c Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Mon, 21 Jun 2021 20:14:31 +0200 Subject: [PATCH 15/46] proxy: use new confd release with fixed file watcher --- proxy/Dockerfile | 2 +- proxy/Makefile | 2 +- proxy/conf.d/nginx.toml | 4 +++- proxy/nginx-proxy | 18 ++++++++++++++---- 4 files changed, 19 insertions(+), 7 deletions(-) diff --git a/proxy/Dockerfile b/proxy/Dockerfile index 105e89c4..7885f15e 100644 --- a/proxy/Dockerfile +++ b/proxy/Dockerfile @@ -1,6 +1,6 @@ FROM nginx:1.19-alpine ARG CONFD_REPO=iwilltry42/confd -ARG CONFD_VERSION=0.16.1 +ARG CONFD_VERSION=0.17.0-rc.0 ARG OS=linux ARG ARCH=amd64 RUN echo "Building for '${OS}/${ARCH}'..." \ diff --git a/proxy/Makefile b/proxy/Makefile index 6555f6ae..53b943de 100644 --- a/proxy/Makefile +++ b/proxy/Makefile @@ -1,5 +1,5 @@ .PHONY: test test: - docker build . -t rancher/k3d-proxy:dev + docker build . -t rancher/k3d-proxy:dev --no-cache docker run --rm -v $(shell pwd)/test/portmap.yaml:/etc/confd/values.yaml rancher/k3d-proxy:dev \ No newline at end of file diff --git a/proxy/conf.d/nginx.toml b/proxy/conf.d/nginx.toml index 96805409..a82534b3 100644 --- a/proxy/conf.d/nginx.toml +++ b/proxy/conf.d/nginx.toml @@ -4,4 +4,6 @@ dest = "/etc/nginx/nginx.conf" keys = [ "ports", "settings" -] \ No newline at end of file +] +check_cmd = "/usr/sbin/nginx -T -c {{.src}}" +reload_cmd = "/usr/sbin/nginx -s reload" diff --git a/proxy/nginx-proxy b/proxy/nginx-proxy index 1a1cd53b..33fb5f87 100755 --- a/proxy/nginx-proxy +++ b/proxy/nginx-proxy @@ -1,14 +1,24 @@ #!/bin/sh -# Run confd set -e -confd -onetime -backend file -file /etc/confd/values.yaml -log-level debug -set +e + +# Config Options +INIT_CONFIG_MAX_RETRIES=3 + +# Run confd +for i in $(seq 1 $INIT_CONFIG_MAX_RETRIES); do + echo "[$(date -Iseconds)] creating initial nginx config (try $i/$INIT_CONFIG_MAX_RETRIES)" + confd -onetime -backend file -file /etc/confd/values.yaml -log-level debug -sync-only + sleep 2 +done # Output Configuration echo "===== Initial nginx configuration =====" -cat /etc/nginx/nginx.conf +nginx -T -c /etc/nginx/nginx.conf echo "=======================================" +# Start confd in watch mode (every second) +confd -watch -backend file -file /etc/confd/values.yaml -log-level debug & + # Start nginx nginx -g 'daemon off;' \ No newline at end of file From 49451df1f728978deb31b58aa8b530746f0027ab Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Mon, 21 Jun 2021 20:34:55 +0200 Subject: [PATCH 16/46] loadbalancer: use auto-reload for confd (file watcher) --- pkg/client/loadbalancer.go | 10 +--------- proxy/Dockerfile | 1 + 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index 0ed93bd1..efac0d33 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -26,7 +26,6 @@ import ( "context" "fmt" "io/ioutil" - "strings" "github.com/docker/go-connections/nat" "github.com/go-test/deep" @@ -74,14 +73,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return fmt.Errorf("error writing new loadbalancer config to container: %w", err) } - command := "confd -onetime -backend file -file /etc/confd/values.yaml -log-level debug && nginx -s reload" - if err := runtime.ExecInNode(ctx, cluster.ServerLoadBalancer, []string{"sh", "-c", command}); err != nil { - if strings.Contains(err.Error(), "host not found in upstream") { - log.Warnf("Loadbalancer configuration updated, but one or more k3d nodes seem to be down, check the logs:\n%s", err.Error()) - return nil - } - return err - } + // TODO: check if loadbalancer is running fine after auto-applying the change return nil } diff --git a/proxy/Dockerfile b/proxy/Dockerfile index 7885f15e..3ffee0a3 100644 --- a/proxy/Dockerfile +++ b/proxy/Dockerfile @@ -1,4 +1,5 @@ FROM nginx:1.19-alpine +# TODO:_ consider switching to https://github.com/abtreece/confd to not maintain a custom fork anymore ARG CONFD_REPO=iwilltry42/confd ARG CONFD_VERSION=0.17.0-rc.0 ARG OS=linux From 2b059962c4b228f9de1d95c3ca06e058947362ba Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Mon, 21 Jun 2021 21:00:26 +0200 Subject: [PATCH 17/46] node/edit: use new loadbalancer config file for ports update --- pkg/client/node.go | 58 ++++++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/pkg/client/node.go b/pkg/client/node.go index f981d304..a824d4c7 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -34,6 +34,7 @@ import ( "time" copystruct "github.com/mitchellh/copystructure" + "gopkg.in/yaml.v2" "github.com/docker/go-connections/nat" dockerunits "github.com/docker/go-units" @@ -691,40 +692,37 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang // --- Loadbalancer specifics --- if result.Role == k3d.LoadBalancerRole { - nodeEditApplyLBSpecifics(ctx, result) - } - - // replace existing node - return NodeReplace(ctx, runtime, existingNode, result) - -} + cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: existingNode.RuntimeLabels[k3d.LabelClusterName]}) + if err != nil { + return fmt.Errorf("error updating loadbalancer config: %w", err) + } + cluster.ServerLoadBalancer = result + lbConfig, err := LoadbalancerGenerateConfig(cluster) + if err != nil { + return fmt.Errorf("error generating loadbalancer config: %v", err) + } -func nodeEditApplyLBSpecifics(ctx context.Context, lbNode *k3d.Node) { - tcp_ports := []string{} - udp_ports := []string{} - for index, env := range lbNode.Env { - if strings.HasPrefix(env, "PORTS=") || strings.HasPrefix(env, "UDP_PORTS=") { - // Remove matching environment variable from slice (does not preserve order) - lbNode.Env[index] = lbNode.Env[len(lbNode.Env)-1] // copy last element to index of matching env - lbNode.Env[len(lbNode.Env)-1] = "" // remove last element - lbNode.Env = lbNode.Env[:len(lbNode.Env)-1] // truncate + // prepare to write config to lb container + configyaml, err := yaml.Marshal(lbConfig) + if err != nil { + return err } - } - for port := range lbNode.Ports { - switch port.Proto() { - case "tcp": - tcp_ports = append(tcp_ports, port.Port()) - break - case "udp": - udp_ports = append(udp_ports, port.Port()) - break - default: - log.Warnf("Unknown port protocol %s for port %s", port.Proto(), port.Port()) + writeLbConfigAction := k3d.NodeHook{ + Stage: k3d.LifecycleStagePreStart, + Action: actions.WriteFileAction{ + Runtime: runtime, + Dest: k3d.DefaultLoadbalancerConfigPath, + Mode: 0744, + Content: configyaml, + }, } + + result.HookActions = append(result.HookActions, writeLbConfigAction) } - lbNode.Env = append(lbNode.Env, fmt.Sprintf("PORTS=%s", strings.Join(tcp_ports, ","))) - lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) + + // replace existing node + return NodeReplace(ctx, runtime, existingNode, result) } func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.Node) error { @@ -755,7 +753,7 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No // start new node log.Infof("Starting new node %s...", new.Name) - if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true}); err != nil { + if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil { if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil { return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err) } From d10ed031d833d52d4fec1e2cfe6b9e9048874449 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 19:00:33 +0200 Subject: [PATCH 18/46] nodeWaitForLogMessage: log found target line when on >= trace level logging --- pkg/client/node.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/client/node.go b/pkg/client/node.go index a824d4c7..2ba1c0b3 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -604,6 +604,14 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node * } // check if we can find the specified line in the log if nRead > 0 && strings.Contains(output, message) { + if log.GetLevel() >= log.TraceLevel { + temp := strings.Split(output, "\n") + for _, l := range temp { + if strings.Contains(l, message) { + log.Tracef("Found target log line: `%s`", l) + } + } + } break } From 5e2c03f586d2e9b44424b683c1283534c43cb4a0 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 19:46:44 +0200 Subject: [PATCH 19/46] updatelbconfig: check for log output to see if the update succeeded and give proper info --- pkg/client/loadbalancer.go | 32 +++++++++++++++++++++++++++++++- pkg/client/node.go | 15 ++++++++------- 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index efac0d33..ec43d9fd 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -24,8 +24,10 @@ package client import ( "bytes" "context" + "errors" "fmt" "io/ioutil" + "time" "github.com/docker/go-connections/nat" "github.com/go-test/deep" @@ -36,6 +38,11 @@ import ( "gopkg.in/yaml.v2" ) +var ( + LBConfigErrHostNotFound = errors.New("lbconfig: host not found") + LBConfigErrFailedTest = errors.New("lbconfig: failed to test") +) + // UpdateLoadbalancerConfig updates the loadbalancer config with an updated list of servers belonging to that cluster func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) error { @@ -69,11 +76,34 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return fmt.Errorf("error marshalling the new loadbalancer config: %w", err) } log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml)) + startTime := time.Now().Truncate(time.Second).UTC() + log.Debugf("timestamp: %s", startTime.Format("2006-01-02T15:04:05.999999999Z")) if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil { return fmt.Errorf("error writing new loadbalancer config to container: %w", err) } - // TODO: check if loadbalancer is running fine after auto-applying the change + successCtx, successCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second)) + defer successCtxCancel() + err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + failureCtx, failureCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second)) + defer failureCtxCancel() + err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer, "host not found in upstream", startTime) + if err != nil { + log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err) + return LBConfigErrFailedTest + } else { + log.Warnln("Failed to configure loadbalancer because one of the nodes seems to be down! Run `k3d node list` to see which one it could be.") + return LBConfigErrHostNotFound + } + } else { + log.Warnf("Failed to ensure that loadbalancer was configured correctly. Please check it manually or try again: %v", err) + return LBConfigErrFailedTest + } + } + + time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits return nil } diff --git a/pkg/client/node.go b/pkg/client/node.go index 2ba1c0b3..8e644b79 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -208,8 +208,9 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N // if it's a server node, then update the loadbalancer configuration if node.Role == k3d.ServerRole { if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil { - log.Errorln("Failed to update cluster loadbalancer") - return err + if !errors.Is(err, LBConfigErrHostNotFound) { + return fmt.Errorf("error updating loadbalancer: %w", err) + } } } @@ -496,8 +497,9 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, o // if it's a server node, then update the loadbalancer configuration if node.Role == k3d.ServerRole { if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil { - log.Errorln("Failed to update cluster loadbalancer") - return err + if !errors.Is(err, LBConfigErrHostNotFound) { + return fmt.Errorf("Failed to update cluster loadbalancer: %w", err) + } } } } @@ -577,7 +579,7 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node * if ok { log.Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now()) } - return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s", message, node.Name) + return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s: %w", message, node.Name, ctx.Err()) } return ctx.Err() default: @@ -589,8 +591,7 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node * if out != nil { out.Close() } - log.Errorf("Failed waiting for log message '%s' from node '%s'", message, node.Name) - return err + return fmt.Errorf("Failed waiting for log message '%s' from node '%s': %w", message, node.Name, err) } defer out.Close() From 6223601cdfb2f8ba825252ff043e130b2849c81c Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 20:15:09 +0200 Subject: [PATCH 20/46] update changelog --- CHANGELOG.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 86cb9a96..f41f30b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,28 @@ # Changelog +## v5.0.0 + +### Fixes + +- cleaned up and properly sorted the sanitization of existing resources used to create new nodes (#638) + +### Features & Enhancements + +- new command: `k3d node edit` to edit existing nodes (#615) + - currently only allows `k3d node edit NODE --port-add HOSTPORT:CONTAINERPORT` for the serverlb/loadbalancer to add new ports + - pkg: new `NodeEdit` function +- new (hidden) command: `k3d debug` with some options for debugging k3d resources (#638) + - e.g. `k3d debug loadbalancer get-config` to get the current loadbalancer configuration +- loadbalancer / k3d-proxy (#638) + - updated fork of `confd` to make usage of the file backend including a file watcher for auto-reloads + - this also checks the config before applying it, so the lb doesn't crash on a faulty config + - updating the loadbalancer writes the new config file and also checks if everything's going fine afterwards +- helper images can now be set explicitly via environment variables: `K3D_IMAGE_LOADBALANCER` & `K3D_IMAGE_TOOLS` (#638) + +### Misc + +- tests/e2e: timeouts everywhere to avoid killing DroneCI (#638) + ## v4.4.6 ### Fixes From cc544a273e1dffe5455bfdcd79a9267feed3551f Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 20:56:40 +0200 Subject: [PATCH 21/46] some really final log output after creating/deleting nodes --- cmd/node/nodeCreate.go | 1 + cmd/node/nodeDelete.go | 1 + pkg/client/node.go | 1 + 3 files changed, 3 insertions(+) diff --git a/cmd/node/nodeCreate.go b/cmd/node/nodeCreate.go index 8169170c..3ded6faa 100644 --- a/cmd/node/nodeCreate.go +++ b/cmd/node/nodeCreate.go @@ -55,6 +55,7 @@ func NewCmdNodeCreate() *cobra.Command { log.Errorf("Failed to add nodes to cluster '%s'", cluster.Name) log.Fatalln(err) } + log.Infof("Successfully created %d node(s)!", len(nodes)) }, } diff --git a/cmd/node/nodeDelete.go b/cmd/node/nodeDelete.go index 91c50764..ad1bb72b 100644 --- a/cmd/node/nodeDelete.go +++ b/cmd/node/nodeDelete.go @@ -59,6 +59,7 @@ func NewCmdNodeDelete() *cobra.Command { log.Fatalln(err) } } + log.Infof("Successfully deleted %d node(s)!", len(nodes)) } }, } diff --git a/pkg/client/node.go b/pkg/client/node.go index 8e644b79..ef0dce2f 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -207,6 +207,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N // if it's a server node, then update the loadbalancer configuration if node.Role == k3d.ServerRole { + log.Infoln("Updating loadbalancer config to include new server node(s)") if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil { if !errors.Is(err, LBConfigErrHostNotFound) { return fmt.Errorf("error updating loadbalancer: %w", err) From 574148685de4db062933aef5c6711ae4c57d85e2 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 21:08:57 +0200 Subject: [PATCH 22/46] nodeCreate: remove dead code and parallelize adding nodes to the cluster completely --- CHANGELOG.md | 3 +++ cmd/node/nodeCreate.go | 2 +- pkg/client/loadbalancer.go | 2 +- pkg/client/node.go | 23 +++++------------------ 4 files changed, 10 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f41f30b7..7c41516b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,10 +18,13 @@ - this also checks the config before applying it, so the lb doesn't crash on a faulty config - updating the loadbalancer writes the new config file and also checks if everything's going fine afterwards - helper images can now be set explicitly via environment variables: `K3D_IMAGE_LOADBALANCER` & `K3D_IMAGE_TOOLS` (#638) +- concurrently add new nodes to an existing cluster (remove some dumb code) (#640) + - `--wait` is now the default for `k3d node create` ### Misc - tests/e2e: timeouts everywhere to avoid killing DroneCI (#638) +- logs: really final output when creating/deleting nodes (so far, we were not outputting a final success message and the process was still doing stuff) (#640) ## v4.4.6 diff --git a/cmd/node/nodeCreate.go b/cmd/node/nodeCreate.go index 3ded6faa..76151033 100644 --- a/cmd/node/nodeCreate.go +++ b/cmd/node/nodeCreate.go @@ -73,7 +73,7 @@ func NewCmdNodeCreate() *cobra.Command { cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image used for the node(s)") cmd.Flags().String("memory", "", "Memory limit imposed on the node [From docker]") - cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", false, "Wait for the node(s) to be ready before returning.") + cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", true, "Wait for the node(s) to be ready before returning.") cmd.Flags().DurationVar(&createNodeOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.") cmd.Flags().StringSliceP("runtime-label", "", []string{}, "Specify container runtime labels in format \"foo=bar\"") diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index ec43d9fd..84843c0a 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -77,7 +77,6 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu } log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml)) startTime := time.Now().Truncate(time.Second).UTC() - log.Debugf("timestamp: %s", startTime.Format("2006-01-02T15:04:05.999999999Z")) if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil { return fmt.Errorf("error writing new loadbalancer config to container: %w", err) } @@ -102,6 +101,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return LBConfigErrFailedTest } } + log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Name) time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits diff --git a/pkg/client/node.go b/pkg/client/node.go index ef0dce2f..f0e00482 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -228,26 +228,13 @@ func NodeAddToClusterMulti(ctx context.Context, runtime runtimes.Runtime, nodes nodeWaitGroup, ctx := errgroup.WithContext(ctx) for _, node := range nodes { - if err := NodeAddToCluster(ctx, runtime, node, cluster, createNodeOpts); err != nil { - return err - } - if createNodeOpts.Wait { - currentNode := node - nodeWaitGroup.Go(func() error { - log.Debugf("Starting to wait for node '%s'", currentNode.Name) - readyLogMessage := k3d.ReadyLogMessageByRole[currentNode.Role] - if readyLogMessage != "" { - return NodeWaitForLogMessage(ctx, runtime, currentNode, readyLogMessage, time.Time{}) - } - log.Warnf("NodeAddToClusterMulti: Set to wait for node %s to get ready, but there's no target log message defined", currentNode.Name) - return nil - }) - } + currentNode := node + nodeWaitGroup.Go(func() error { + return NodeAddToCluster(ctx, runtime, currentNode, cluster, createNodeOpts) + }) } if err := nodeWaitGroup.Wait(); err != nil { - log.Errorln("Failed to bring up all nodes in time. Check the logs:") - log.Errorf(">>> %+v", err) - return fmt.Errorf("Failed to add nodes") + return fmt.Errorf("Failed to add one or more nodes: %w", err) } return nil From d0deee3b183c89e35e43f706db2b8004eacd5c1e Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 21:49:16 +0200 Subject: [PATCH 23/46] update changelog to include already merged prs --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c41516b..514dc676 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,11 +20,29 @@ - helper images can now be set explicitly via environment variables: `K3D_IMAGE_LOADBALANCER` & `K3D_IMAGE_TOOLS` (#638) - concurrently add new nodes to an existing cluster (remove some dumb code) (#640) - `--wait` is now the default for `k3d node create` +- normalized flag usage for k3s and runtime (#598, @ejose19) + - rename `k3d cluster create --label` to `k3d cluster create --runtime-label` (as it's labelling the node on runtime level, e.g. docker) + - config option moved to `options.runtime.labels` + - add `k3d cluster create --k3s-node-label` to add Kubernetes node labels via k3s flag (#584, @developer-guy, @ejose, @dentrax) + - new config option `options.k3s.nodeLabels` + - the same for `k3d node create` +- improved config file handling (#605) + - new version `v1alpha3` + - warning when using outdated version + - validation dynamically based on provided config apiVersion + - new default for `k3d config init` + - new command `k3d config migrate INPUT [OUTPUT]` to migrate config files between versions + - currently supported migration `v1alpha2` -> `v1alpha3` + - pkg: new `Config` interface type to support new generic `FromViper` config file parsing +- changed flags `--k3s-server-arg` & `--k3s-agent-arg` into `--k3s-arg` with nodefilter support (#605) + - new config path `options.k3s.extraArgs` ### Misc - tests/e2e: timeouts everywhere to avoid killing DroneCI (#638) - logs: really final output when creating/deleting nodes (so far, we were not outputting a final success message and the process was still doing stuff) (#640) +- tests/e2e: add tests for v1alpha2 to v1alpha3 migration +- docs: use v1alpha3 config version ## v4.4.6 From 7db9275a5b04160f58635e8fe76586fa8cb9575e Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Thu, 24 Jun 2021 10:53:39 +0200 Subject: [PATCH 24/46] clusterCreate: use tempfile with expanded env vars as viper input file --- CHANGELOG.md | 1 + cmd/cluster/clusterCreate.go | 25 +++++++++++++++++++++++-- tests/assets/config_test_simple.yaml | 2 +- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 514dc676..e86ce33f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ - pkg: new `Config` interface type to support new generic `FromViper` config file parsing - changed flags `--k3s-server-arg` & `--k3s-agent-arg` into `--k3s-arg` with nodefilter support (#605) - new config path `options.k3s.extraArgs` +- config file: environment variables (`$VAR`, `${VAR}` will be expanded unconditionally) (#643) ### Misc diff --git a/cmd/cluster/clusterCreate.go b/cmd/cluster/clusterCreate.go index 89a2fbae..7e977f5a 100644 --- a/cmd/cluster/clusterCreate.go +++ b/cmd/cluster/clusterCreate.go @@ -24,7 +24,9 @@ package cluster import ( "fmt" + "io/ioutil" "os" + "path/filepath" "runtime" "strings" "time" @@ -72,12 +74,31 @@ func initConfig() { // Set config file, if specified if configFile != "" { - cfgViper.SetConfigFile(configFile) if _, err := os.Stat(configFile); err != nil { log.Fatalf("Failed to stat config file %s: %+v", configFile, err) } + // create temporary file to expand environment variables in the config without writing that back to the original file + // we're doing it here, because this happens just before absolutely all other processing + tmpfile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("k3d-config-tmp-%s", filepath.Base(configFile))) + if err != nil { + log.Fatalf("error creating temp copy of configfile %s for variable expansion: %v", configFile, err) + } + defer tmpfile.Close() + + originalcontent, err := ioutil.ReadFile(configFile) + if err != nil { + log.Fatalf("error reading config file %s: %v", configFile, err) + } + expandedcontent := os.ExpandEnv(string(originalcontent)) + if _, err := tmpfile.WriteString(expandedcontent); err != nil { + log.Fatalf("error writing expanded config file contents to temp file %s: %v", tmpfile.Name(), err) + } + + // use temp file with expanded variables + cfgViper.SetConfigFile(tmpfile.Name()) + // try to read config into memory (viper map structure) if err := cfgViper.ReadInConfig(); err != nil { if _, ok := err.(viper.ConfigFileNotFoundError); ok { @@ -96,7 +117,7 @@ func initConfig() { log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err) } - log.Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind"))) + log.Infof("Using config file %s (%s#%s)", configFile, strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind"))) } if log.GetLevel() >= log.DebugLevel { c, _ := yaml.Marshal(cfgViper.AllSettings()) diff --git a/tests/assets/config_test_simple.yaml b/tests/assets/config_test_simple.yaml index 75588924..0d4440fa 100755 --- a/tests/assets/config_test_simple.yaml +++ b/tests/assets/config_test_simple.yaml @@ -8,7 +8,7 @@ kubeAPI: hostPort: "6446" image: rancher/k3s:latest volumes: - - volume: /my/path:/some/path + - volume: $HOME:/some/path nodeFilters: - all ports: From 1d72983a024ace5c913fff881c8a6e4ae06b0ca6 Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Wed, 16 Jun 2021 15:59:43 +0200 Subject: [PATCH 25/46] [FEATURE] add ability to add ports to an existing loadbalancer (#615) --- go.mod | 1 - go.sum | 1 - pkg/client/node.go | 7 ++ vendor/golang.org/x/sync/AUTHORS | 3 - vendor/golang.org/x/sync/CONTRIBUTORS | 3 - vendor/golang.org/x/sync/LICENSE | 27 -------- vendor/golang.org/x/sync/PATENTS | 22 ------- vendor/golang.org/x/sync/errgroup/errgroup.go | 66 ------------------- vendor/modules.txt | 3 - 9 files changed, 7 insertions(+), 126 deletions(-) delete mode 100644 vendor/golang.org/x/sync/AUTHORS delete mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/sync/LICENSE delete mode 100644 vendor/golang.org/x/sync/PATENTS delete mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go diff --git a/go.mod b/go.mod index 6893fcd8..b0d20808 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,6 @@ require ( github.com/spf13/viper v1.8.1 github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect golang.org/x/text v0.3.6 // indirect gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 879f84b4..6f0cbba9 100644 --- a/go.sum +++ b/go.sum @@ -686,7 +686,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/pkg/client/node.go b/pkg/client/node.go index f0e00482..3a895ed7 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -34,7 +34,10 @@ import ( "time" copystruct "github.com/mitchellh/copystructure" +<<<<<<< HEAD "gopkg.in/yaml.v2" +======= +>>>>>>> 9a18c3a3 ([FEATURE] add ability to add ports to an existing loadbalancer (#615)) "github.com/docker/go-connections/nat" dockerunits "github.com/docker/go-units" @@ -750,7 +753,11 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No // start new node log.Infof("Starting new node %s...", new.Name) +<<<<<<< HEAD if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil { +======= + if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true}); err != nil { +>>>>>>> 9a18c3a3 ([FEATURE] add ability to add ports to an existing loadbalancer (#615)) if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil { return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err) } diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS deleted file mode 100644 index 15167cd7..00000000 --- a/vendor/golang.org/x/sync/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS deleted file mode 100644 index 1c4577e9..00000000 --- a/vendor/golang.org/x/sync/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/sync/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go deleted file mode 100644 index 9857fe53..00000000 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. -package errgroup - -import ( - "context" - "sync" -) - -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. -// -// A zero Group is valid and does not cancel on error. -type Group struct { - cancel func() - - wg sync.WaitGroup - - errOnce sync.Once - err error -} - -// WithContext returns a new Group and an associated Context derived from ctx. -// -// The derived Context is canceled the first time a function passed to Go -// returns a non-nil error or the first time Wait returns, whichever occurs -// first. -func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) - return &Group{cancel: cancel}, ctx -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. -func (g *Group) Wait() error { - g.wg.Wait() - if g.cancel != nil { - g.cancel() - } - return g.err -} - -// Go calls the given function in a new goroutine. -// -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. -func (g *Group) Go(f func() error) { - g.wg.Add(1) - - go func() { - defer g.wg.Done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel() - } - }) - } - }() -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 54f233ab..d499b5ef 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -233,9 +233,6 @@ golang.org/x/net/proxy # golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c -## explicit -golang.org/x/sync/errgroup # golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 ## explicit golang.org/x/sys/execabs From 9574002b38978c62543ade6f43b354e8d60ebde2 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Mon, 28 Jun 2021 11:16:00 +0200 Subject: [PATCH 26/46] add some more portmapping work --- cmd/cluster/clusterCreate.go | 2 +- pkg/client/node.go | 14 +----- pkg/types/loadbalancer.go | 94 ++++++++++++++++++++++++++++++++++++ pkg/types/node.go | 39 +++++++++++++++ pkg/types/types.go | 42 ---------------- pkg/util/filter.go | 23 ++++++--- 6 files changed, 152 insertions(+), 62 deletions(-) create mode 100644 pkg/types/loadbalancer.go create mode 100644 pkg/types/node.go diff --git a/cmd/cluster/clusterCreate.go b/cmd/cluster/clusterCreate.go index 7e977f5a..c238a736 100644 --- a/cmd/cluster/clusterCreate.go +++ b/cmd/cluster/clusterCreate.go @@ -301,7 +301,7 @@ func NewCmdClusterCreate() *cobra.Command { cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`") _ = ppViper.BindPFlag("cli.volumes", cmd.Flags().Lookup("volume")) - cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`") + cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`") _ = ppViper.BindPFlag("cli.ports", cmd.Flags().Lookup("port")) cmd.Flags().StringArrayP("k3s-node-label", "", nil, "Add label to k3s node (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --k3s-node-label \"my.label@agent[0,1]\" --k3s-node-label \"other.label=somevalue@server[0]\"`") diff --git a/pkg/client/node.go b/pkg/client/node.go index 3a895ed7..97a2212e 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -386,19 +386,7 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c /* global node configuration (applies for any node role) */ // ### Labels ### - labels := make(map[string]string) - for k, v := range k3d.DefaultRuntimeLabels { - labels[k] = v - } - for k, v := range k3d.DefaultRuntimeLabelsVar { - labels[k] = v - } - for k, v := range node.RuntimeLabels { - labels[k] = v - } - node.RuntimeLabels = labels - // second most important: the node role label - node.RuntimeLabels[k3d.LabelRole] = string(node.Role) + node.FillRuntimeLabels() for k, v := range node.K3sNodeLabels { node.Args = append(node.Args, "--node-label", fmt.Sprintf("%s=%s", k, v)) diff --git a/pkg/types/loadbalancer.go b/pkg/types/loadbalancer.go new file mode 100644 index 00000000..4145440c --- /dev/null +++ b/pkg/types/loadbalancer.go @@ -0,0 +1,94 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package types + +/* DESCRIPTION + * The Loadbalancer is a customized NGINX container running side-by-side with the cluster, NOT INSIDE IT. + * It is used to do plain proxying of tcp/udp ports to the k3d node containers. + * One advantage of this approach is, that we can add new ports while the cluster is still running by re-creating + * the loadbalancer and adding the new port config in the NGINX config. As the loadbalancer doesn't hold any state + * (apart from the config file), it can easily be re-created in just a few seconds. + */ + +/* + * Loadbalancer Definition + */ + +type Loadbalancer struct { + Node Node `mapstructure:",squash" yaml:",inline"` // the underlying node + Config LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration +} + +func NewLoadbalancer() *Loadbalancer { + return &Loadbalancer{ + Node: Node{ + Role: LoadBalancerRole, + Image: GetLoadbalancerImage(), + }, + } +} + +/* + * Loadbalancer Configuration + */ + +/* LoadbalancerConfig defines the coarse file structure to configure the k3d-proxy + * Example: + * ports: + * 1234.tcp: + * - k3d-k3s-default-server-0 + * - k3d-k3s-default-server-1 + * 4321.udp: + * - k3d-k3s-default-agent-0 + * - k3d-k3s-default-agent-1 + */ +type LoadbalancerConfig struct { + Ports map[string][]string `yaml:"ports"` + Settings LoadBalancerSettings `yaml:"settings"` +} + +type LoadBalancerSettings struct { + WorkerProcesses int `yaml:"workerProcesses"` +} + +const ( + DefaultLoadbalancerConfigPath = "/etc/confd/values.yaml" + DefaultLoadbalancerWorkerProcesses = 1024 +) + +type LoadbalancerCreateOpts struct { + Labels map[string]string +} + +/* + * Helper Functions + */ + +// HasLoadBalancer returns true if cluster has a loadbalancer node +func (c *Cluster) HasLoadBalancer() bool { + for _, node := range c.Nodes { + if node.Role == LoadBalancerRole { + return true + } + } + return false +} diff --git a/pkg/types/node.go b/pkg/types/node.go new file mode 100644 index 00000000..d1a6c55b --- /dev/null +++ b/pkg/types/node.go @@ -0,0 +1,39 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package types + +func (node *Node) FillRuntimeLabels() { + labels := make(map[string]string) + for k, v := range DefaultRuntimeLabels { + labels[k] = v + } + for k, v := range DefaultRuntimeLabelsVar { + labels[k] = v + } + for k, v := range node.RuntimeLabels { + labels[k] = v + } + node.RuntimeLabels = labels + // second most important: the node role label + node.RuntimeLabels[LabelRole] = string(node.Role) + +} diff --git a/pkg/types/types.go b/pkg/types/types.go index f96377cf..8e2f946c 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -296,16 +296,6 @@ func (c *Cluster) AgentCountRunning() (int, int) { return agentCount, agentsRunning } -// HasLoadBalancer returns true if cluster has a loadbalancer node -func (c *Cluster) HasLoadBalancer() bool { - for _, node := range c.Nodes { - if node.Role == LoadBalancerRole { - return true - } - } - return false -} - type NodeIP struct { IP netaddr.IP Static bool @@ -412,35 +402,3 @@ type RegistryExternal struct { Host string `yaml:"host" json:"host"` Port string `yaml:"port" json:"port"` } - -/* - * Loadbalancer - */ - -/* LoadbalancerConfig defines the coarse file structure to configure the k3d-proxy - * Example: - * ports: - * 1234.tcp: - * - k3d-k3s-default-server-0 - * - k3d-k3s-default-server-1 - * 4321.udp: - * - k3d-k3s-default-agent-0 - * - k3d-k3s-default-agent-1 - */ -type LoadbalancerConfig struct { - Ports map[string][]string `yaml:"ports"` - Settings LoadBalancerSettings `yaml:"settings"` -} - -type LoadBalancerSettings struct { - WorkerProcesses int `yaml:"workerProcesses"` -} - -const ( - DefaultLoadbalancerConfigPath = "/etc/confd/values.yaml" - DefaultLoadbalancerWorkerProcesses = 1024 -) - -type LoadbalancerCreateOpts struct { - Labels map[string]string -} diff --git a/pkg/util/filter.go b/pkg/util/filter.go index 99bcab9f..d9f80a7a 100644 --- a/pkg/util/filter.go +++ b/pkg/util/filter.go @@ -31,15 +31,26 @@ import ( log "github.com/sirupsen/logrus" ) +type NodeFilterSuffix string + +const ( + NodeFilterSuffixNone NodeFilterSuffix = "none" + NodeFilterMapKeyAll = "all" +) + // Regexp pattern to match node filters -var filterRegexp = regexp.MustCompile(`^(?Pserver|agent|loadbalancer|all)(?P\[(?P(?P(\d+,?)+)|(?P\d*:\d*)|(?P\*))\])?$`) +var NodeFilterRegexp = regexp.MustCompile(`^(?Pserver|servers|agent|agents|loadbalancer|all)(?P:(?P(?P(\d+,?)+)|(?P\d*-\d*)|(?P\*)))?(?P:[[:alpha:]]+)?$`) // FilterNodes takes a string filter to return a filtered list of nodes -func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { +func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, error) { + + result := map[string][]*k3d.Node{ + NodeFilterMapKeyAll: nodes, + } if len(filters) == 0 || len(filters[0]) == 0 { log.Warnln("No node filter specified") - return nodes, nil + return result, nil } // map roles to subsets @@ -64,21 +75,21 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { for _, filter := range filters { // match regex with capturing groups - match := filterRegexp.FindStringSubmatch(filter) + match := NodeFilterRegexp.FindStringSubmatch(filter) if len(match) == 0 { return nil, fmt.Errorf("Failed to parse node filters: invalid format or empty subset in '%s'", filter) } // map capturing group names to submatches - submatches := MapSubexpNames(filterRegexp.SubexpNames(), match) + submatches := MapSubexpNames(NodeFilterRegexp.SubexpNames(), match) // if one of the filters is 'all', we only return this and drop all others if submatches["group"] == "all" { if len(filters) > 1 { log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters) } - return nodes, nil + return result, nil } // Choose the group of nodes to operate on From d15ed268751be5b6899b3eebf3f017689fa7fac9 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 30 Jun 2021 08:29:13 +0200 Subject: [PATCH 27/46] changes when creating clusters + new nodefilter syntax - generate node names when transforming from simple to cluster config - ClusterCreate(clusterconfig) should have a ready made config and not generate variables - ClusterCreate() only prep LB if not already present (to be removed) - cluster struct: serverloadbalancer is now of type LoadBalancer (Node + Config) - use new nodefilter syntax with 'id:index:suffix' instead of 'id[index]' everywhere - use suffix when creating the LB --- pkg/client/cluster.go | 54 ++++++------ pkg/client/loadbalancer.go | 31 ++++--- pkg/client/node.go | 2 +- pkg/config/config_test.go | 6 +- .../test_assets/config_test_simple.yaml | 6 +- .../config_test_simple_invalid_servers.yaml | 6 +- pkg/config/transform.go | 83 +++++++++++++++---- pkg/types/loadbalancer.go | 5 +- pkg/types/types.go | 2 +- pkg/util/filter.go | 56 +++++++++++-- tests/assets/config_test_simple.yaml | 6 +- ...config_test_simple_migration_v1alpha2.yaml | 2 +- ...config_test_simple_migration_v1alpha3.yaml | 4 +- tests/test_basic.sh | 2 +- tests/test_config_with_overrides.sh | 2 +- 15 files changed, 182 insertions(+), 85 deletions(-) diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index 27225ed9..6ebb82b8 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -372,12 +372,12 @@ ClusterCreatOpts: clusterCreateOpts.GlobalLabels[k3d.LabelClusterName] = cluster.Name // agent defaults (per cluster) - // connection url is always the name of the first server node (index 0) - connectionURL := fmt.Sprintf("https://%s:%s", generateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort) + // connection url is always the name of the first server node (index 0) // TODO: change this to the server loadbalancer + connectionURL := fmt.Sprintf("https://%s:%s", GenerateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort) clusterCreateOpts.GlobalLabels[k3d.LabelClusterURL] = connectionURL clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("K3S_TOKEN=%s", cluster.Token)) - nodeSetup := func(node *k3d.Node, suffix int) error { + nodeSetup := func(node *k3d.Node) error { // cluster specific settings if node.RuntimeLabels == nil { node.RuntimeLabels = make(map[string]string) // TODO: maybe create an init function? @@ -417,7 +417,6 @@ ClusterCreatOpts: node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL)) } - node.Name = generateNodeName(cluster.Name, node.Role, suffix) node.Networks = []string{cluster.Network.Name} node.Restart = true node.GPURequest = clusterCreateOpts.GPURequest @@ -437,8 +436,6 @@ ClusterCreatOpts: // used for node suffices serverCount := 0 - agentCount := 0 - suffix := 0 // create init node first if cluster.InitNode != nil { @@ -457,7 +454,7 @@ ClusterCreatOpts: cluster.InitNode.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} } - if err := nodeSetup(cluster.InitNode, serverCount); err != nil { + if err := nodeSetup(cluster.InitNode); err != nil { return err } serverCount++ @@ -481,17 +478,11 @@ ClusterCreatOpts: time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of servers registering - // name suffix - suffix = serverCount serverCount++ - } else if node.Role == k3d.AgentRole { - // name suffix - suffix = agentCount - agentCount++ } if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole { - if err := nodeSetup(node, suffix); err != nil { + if err := nodeSetup(node); err != nil { return err } } @@ -499,7 +490,7 @@ ClusterCreatOpts: // WARN, if there are exactly two server nodes: that means we're using etcd, but don't have fault tolerance if serverCount == 2 { - log.Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve quorum & fault tolerance") + log.Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve etcd quorum & fault tolerance") } /* @@ -507,19 +498,26 @@ ClusterCreatOpts: */ // *** ServerLoadBalancer *** if !clusterCreateOpts.DisableLoadBalancer { - lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}) - if err != nil { - return err + if cluster.ServerLoadBalancer == nil { + lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}) + if err != nil { + return err + } + cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback } - cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback - lbConfig, err := LoadbalancerGenerateConfig(cluster) - if err != nil { - return fmt.Errorf("error generating loadbalancer config: %v", err) + if len(cluster.ServerLoadBalancer.Config.Ports) == 0 { + lbConfig, err := LoadbalancerGenerateConfig(cluster) + if err != nil { + return fmt.Errorf("error generating loadbalancer config: %v", err) + } + cluster.ServerLoadBalancer.Config = lbConfig } + cluster.ServerLoadBalancer.Node.RuntimeLabels = clusterCreateOpts.GlobalLabels + // prepare to write config to lb container - configyaml, err := yaml.Marshal(lbConfig) + configyaml, err := yaml.Marshal(cluster.ServerLoadBalancer.Config) if err != nil { return err } @@ -534,13 +532,13 @@ ClusterCreatOpts: }, } - lbNode.HookActions = append(lbNode.HookActions, writeLbConfigAction) + cluster.ServerLoadBalancer.Node.HookActions = append(cluster.ServerLoadBalancer.Node.HookActions, writeLbConfigAction) - log.Infof("Creating LoadBalancer '%s'", lbNode.Name) - if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { + log.Infof("Creating LoadBalancer '%s'", cluster.ServerLoadBalancer.Node.Name) + if err := NodeCreate(ctx, runtime, cluster.ServerLoadBalancer.Node, k3d.NodeCreateOpts{}); err != nil { return fmt.Errorf("error creating loadbalancer: %v", err) } - log.Debugf("Created loadbalancer '%s'", lbNode.Name) + log.Debugf("Created loadbalancer '%s'", cluster.ServerLoadBalancer.Node.Name) return err } @@ -795,7 +793,7 @@ func GenerateClusterToken() string { return util.GenerateRandomString(20) } -func generateNodeName(cluster string, role k3d.Role, suffix int) string { +func GenerateNodeName(cluster string, role k3d.Role, suffix int) string { return fmt.Sprintf("%s-%s-%s-%d", k3d.DefaultObjectNamePrefix, cluster, role, suffix) } diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index 84843c0a..dc37d5a6 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -77,18 +77,18 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu } log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml)) startTime := time.Now().Truncate(time.Second).UTC() - if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil { + if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer.Node); err != nil { return fmt.Errorf("error writing new loadbalancer config to container: %w", err) } successCtx, successCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second)) defer successCtxCancel() - err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime) + err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer.Node, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime) if err != nil { if errors.Is(err, context.DeadlineExceeded) { failureCtx, failureCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second)) defer failureCtxCancel() - err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer, "host not found in upstream", startTime) + err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer.Node, "host not found in upstream", startTime) if err != nil { log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err) return LBConfigErrFailedTest @@ -101,7 +101,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return LBConfigErrFailedTest } } - log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Name) + log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Node.Name) time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits @@ -116,7 +116,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste for _, node := range cluster.Nodes { if node.Role == types.LoadBalancerRole { var err error - cluster.ServerLoadBalancer, err = NodeGet(ctx, runtime, node) + cluster.ServerLoadBalancer.Node, err = NodeGet(ctx, runtime, node) if err != nil { return cfg, err } @@ -124,7 +124,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste } } - reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer) + reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node) if err != nil { return cfg, err } @@ -162,31 +162,36 @@ func LoadbalancerGenerateConfig(cluster *k3d.Cluster) (k3d.LoadbalancerConfig, e lbConfig.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)] = servers // generate comma-separated list of extra ports to forward // TODO: no default targets? - for exposedPort := range cluster.ServerLoadBalancer.Ports { + for exposedPort := range cluster.ServerLoadBalancer.Node.Ports { // TODO: catch duplicates here? lbConfig.Ports[fmt.Sprintf("%s.%s", exposedPort.Port(), exposedPort.Proto())] = servers } // some additional nginx settings - lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Ports)*len(servers) + lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Node.Ports)*len(servers) return lbConfig, nil } func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, error) { + labels := map[string]string{} - if cluster.ServerLoadBalancer.Ports == nil { - cluster.ServerLoadBalancer.Ports = nat.PortMap{} + if opts != nil && opts.Labels == nil && len(opts.Labels) == 0 { + labels = opts.Labels } - cluster.ServerLoadBalancer.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} + + if cluster.ServerLoadBalancer.Node.Ports == nil { + cluster.ServerLoadBalancer.Node.Ports = nat.PortMap{} + } + cluster.ServerLoadBalancer.Node.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} // Create LB as a modified node with loadbalancerRole lbNode := &k3d.Node{ Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), Image: k3d.GetLoadbalancerImage(), - Ports: cluster.ServerLoadBalancer.Ports, + Ports: cluster.ServerLoadBalancer.Node.Ports, Role: k3d.LoadBalancerRole, - RuntimeLabels: opts.Labels, // TODO: createLoadBalancer: add more expressive labels + RuntimeLabels: labels, // TODO: createLoadBalancer: add more expressive labels Networks: []string{cluster.Network.Name}, Restart: true, } diff --git a/pkg/client/node.go b/pkg/client/node.go index 97a2212e..21946798 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -684,7 +684,7 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang if err != nil { return fmt.Errorf("error updating loadbalancer config: %w", err) } - cluster.ServerLoadBalancer = result + cluster.ServerLoadBalancer.Node = result lbConfig, err := LoadbalancerGenerateConfig(cluster) if err != nil { return fmt.Errorf("error generating loadbalancer config: %v", err) diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 41d76422..7b0c08da 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -81,13 +81,13 @@ func TestReadSimpleConfig(t *testing.T) { ExtraArgs: []conf.K3sArgWithNodeFilters{ { Arg: "--tls-san=127.0.0.1", - NodeFilters: []string{"server[*]"}, + NodeFilters: []string{"server:*"}, }, }, NodeLabels: []conf.LabelWithNodeFilters{ { Label: "foo=bar", - NodeFilters: []string{"server[0]", "loadbalancer"}, + NodeFilters: []string{"server:0", "loadbalancer"}, }, }, }, @@ -99,7 +99,7 @@ func TestReadSimpleConfig(t *testing.T) { Labels: []conf.LabelWithNodeFilters{ { Label: "foo=bar", - NodeFilters: []string{"server[0]", "loadbalancer"}, + NodeFilters: []string{"server:0", "loadbalancer"}, }, }, }, diff --git a/pkg/config/test_assets/config_test_simple.yaml b/pkg/config/test_assets/config_test_simple.yaml index f8f873cb..9ababa90 100644 --- a/pkg/config/test_assets/config_test_simple.yaml +++ b/pkg/config/test_assets/config_test_simple.yaml @@ -33,11 +33,11 @@ options: extraArgs: - arg: --tls-san=127.0.0.1 nodeFilters: - - "server[*]" + - server:* nodeLabels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer kubeconfig: updateDefaultKubeconfig: true @@ -46,5 +46,5 @@ options: labels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer diff --git a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml index 7b9bc8a0..6c67602c 100644 --- a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml +++ b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml @@ -33,11 +33,11 @@ options: extraArgs: - arg: --tls-san=127.0.0.1 nodeFilters: - - "server[*]" + - "server:*" nodeLabels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer kubeconfig: updateDefaultKubeconfig: true @@ -46,5 +46,5 @@ options: labels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer diff --git a/pkg/config/transform.go b/pkg/config/transform.go index 59e674d9..0d35677b 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -31,6 +31,7 @@ import ( "github.com/docker/go-connections/nat" cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg + "github.com/rancher/k3d/v4/pkg/client" conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" k3d "github.com/rancher/k3d/v4/pkg/types" @@ -102,8 +103,11 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim newCluster.Nodes = []*k3d.Node{} if !simpleConfig.Options.K3dOptions.DisableLoadbalancer { - newCluster.ServerLoadBalancer = &k3d.Node{ - Role: k3d.LoadBalancerRole, + newCluster.ServerLoadBalancer = k3d.NewLoadbalancer() + var err error + newCluster.ServerLoadBalancer.Node, err = client.LoadbalancerPrepare(ctx, runtime, &newCluster, nil) + if err != nil { + return nil, fmt.Errorf("error preparing the loadbalancer: %w", err) } } else { log.Debugln("Disabling the load balancer") @@ -115,6 +119,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim for i := 0; i < simpleConfig.Servers; i++ { serverNode := k3d.Node{ + Name: client.GenerateNodeName(newCluster.Name, k3d.ServerRole, i), Role: k3d.ServerRole, Image: simpleConfig.Image, ServerOpts: k3d.ServerOpts{}, @@ -132,6 +137,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim for i := 0; i < simpleConfig.Agents; i++ { agentNode := k3d.Node{ + Name: client.GenerateNodeName(newCluster.Name, k3d.AgentRole, i), Role: k3d.AgentRole, Image: simpleConfig.Image, Memory: simpleConfig.Options.Runtime.AgentsMemory, @@ -148,7 +154,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim nodeList := newCluster.Nodes if !simpleConfig.Options.K3dOptions.DisableLoadbalancer { nodeCount++ - nodeList = append(nodeList, newCluster.ServerLoadBalancer) + nodeList = append(nodeList, newCluster.ServerLoadBalancer.Node) } for _, volumeWithNodeFilters := range simpleConfig.Volumes { nodes, err := util.FilterNodes(nodeList, volumeWithNodeFilters.NodeFilters) @@ -167,27 +173,35 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return nil, fmt.Errorf("Portmapping '%s' lacks a node filter, but there's more than one node", portWithNodeFilters.Port) } - nodes, err := util.FilterNodes(nodeList, portWithNodeFilters.NodeFilters) + x, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) if err != nil { return nil, err } - for _, node := range nodes { + for suffix, nodes := range x { portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) if err != nil { - return nil, fmt.Errorf("Failed to parse port spec '%s': %+v", portWithNodeFilters.Port, err) - } - if node.Ports == nil { - node.Ports = nat.PortMap{} + return nil, fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err) } - for _, pm := range portmappings { - if _, exists := node.Ports[pm.Port]; exists { - node.Ports[pm.Port] = append(node.Ports[pm.Port], pm.Binding) - } else { - node.Ports[pm.Port] = []nat.PortBinding{pm.Binding} + if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings + if newCluster.ServerLoadBalancer == nil { + return nil, fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled") + } + if err := addPortMappings(newCluster.ServerLoadBalancer.Node, portmappings); err != nil { + return nil, err + } + for _, pm := range portmappings { + loadbalancerAddPortConfigs(newCluster.ServerLoadBalancer, pm, nodes) + } + } else if suffix == "direct" { + for _, node := range nodes { + if err := addPortMappings(node, portmappings); err != nil { + return nil, err + } } } } + } // -> K3S NODE LABELS @@ -358,3 +372,44 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return clusterConfig, nil } + +func addPortMappings(node *k3d.Node, portmappings []nat.PortMapping) error { + + if node.Ports == nil { + node.Ports = nat.PortMap{} + } + for _, pm := range portmappings { + if _, exists := node.Ports[pm.Port]; exists { + node.Ports[pm.Port] = append(node.Ports[pm.Port], pm.Binding) + } else { + node.Ports[pm.Port] = []nat.PortBinding{pm.Binding} + } + } + return nil +} + +func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, pm nat.PortMapping, nodes []*k3d.Node) error { + portconfig := fmt.Sprintf("%s.%s", pm.Port.Port(), pm.Port.Proto()) + nodenames := []string{} + for _, node := range nodes { + nodenames = append(nodenames, node.Name) + } + + // entry for that port doesn't exist yet, so we simply create it with the list of node names + if _, ok := loadbalancer.Config.Ports[portconfig]; !ok { + loadbalancer.Config.Ports[portconfig] = nodenames + return nil + } + +nodenameLoop: + for _, nodename := range nodenames { + for _, existingNames := range loadbalancer.Config.Ports[portconfig] { + if nodename == existingNames { + continue nodenameLoop + } + loadbalancer.Config.Ports[portconfig] = append(loadbalancer.Config.Ports[portconfig], nodename) + } + } + + return nil +} diff --git a/pkg/types/loadbalancer.go b/pkg/types/loadbalancer.go index 4145440c..158fe61b 100644 --- a/pkg/types/loadbalancer.go +++ b/pkg/types/loadbalancer.go @@ -34,16 +34,17 @@ package types */ type Loadbalancer struct { - Node Node `mapstructure:",squash" yaml:",inline"` // the underlying node + Node *Node `mapstructure:",squash" yaml:",inline"` // the underlying node Config LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration } func NewLoadbalancer() *Loadbalancer { return &Loadbalancer{ - Node: Node{ + Node: &Node{ Role: LoadBalancerRole, Image: GetLoadbalancerImage(), }, + Config: LoadbalancerConfig{Ports: map[string][]string{}}, } } diff --git a/pkg/types/types.go b/pkg/types/types.go index 8e2f946c..0a33300d 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -262,7 +262,7 @@ type Cluster struct { InitNode *Node // init server node ExternalDatastore *ExternalDatastore `yaml:"externalDatastore,omitempty" json:"externalDatastore,omitempty"` KubeAPI *ExposureOpts `yaml:"kubeAPI" json:"kubeAPI,omitempty"` - ServerLoadBalancer *Node `yaml:"serverLoadbalancer,omitempty" json:"serverLoadBalancer,omitempty"` + ServerLoadBalancer *Loadbalancer `yaml:"serverLoadbalancer,omitempty" json:"serverLoadBalancer,omitempty"` ImageVolume string `yaml:"imageVolume" json:"imageVolume,omitempty"` } diff --git a/pkg/util/filter.go b/pkg/util/filter.go index d9f80a7a..3ab76ea8 100644 --- a/pkg/util/filter.go +++ b/pkg/util/filter.go @@ -31,26 +31,65 @@ import ( log "github.com/sirupsen/logrus" ) -type NodeFilterSuffix string - const ( - NodeFilterSuffixNone NodeFilterSuffix = "none" - NodeFilterMapKeyAll = "all" + NodeFilterSuffixNone = "nosuffix" + NodeFilterMapKeyAll = "all" ) // Regexp pattern to match node filters var NodeFilterRegexp = regexp.MustCompile(`^(?Pserver|servers|agent|agents|loadbalancer|all)(?P:(?P(?P(\d+,?)+)|(?P\d*-\d*)|(?P\*)))?(?P:[[:alpha:]]+)?$`) -// FilterNodes takes a string filter to return a filtered list of nodes -func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, error) { +// FilterNodesBySuffix properly interprets NodeFilters with suffix +func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string][]*k3d.Node, error) { + if len(nodefilters) == 0 || len(nodefilters[0]) == 0 { + return nil, fmt.Errorf("No nodefilters specified") + } result := map[string][]*k3d.Node{ NodeFilterMapKeyAll: nodes, } + for _, nf := range nodefilters { + suffix := NodeFilterSuffixNone + + // match regex with capturing groups + match := NodeFilterRegexp.FindStringSubmatch(nf) + + if len(match) == 0 { + return nil, fmt.Errorf("Failed to parse node filters: invalid format or empty subset in '%s'", nf) + } + + // map capturing group names to submatches + submatches := MapSubexpNames(NodeFilterRegexp.SubexpNames(), match) + + // get suffix + if sf, ok := submatches["suffix"]; ok && sf != "" { + suffix = sf + } + + result[suffix] = make([]*k3d.Node, 0) // init map for this suffix + + filteredNodes, err := FilterNodes(nodes, []string{nf}) + if err != nil { + return nil, err + } + + log.Tracef("Adding %d nodes for suffix >%s< (filter: %s)", len(filteredNodes), suffix, nf) + + result[suffix] = filteredNodes + } + + return result, nil +} + +// FilterNodes takes a string filter to return a filtered list of nodes +func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { + + log.Tracef("Filtering %d nodes by %s", len(nodes), filters) + if len(filters) == 0 || len(filters[0]) == 0 { log.Warnln("No node filter specified") - return result, nil + return nodes, nil } // map roles to subsets @@ -58,7 +97,6 @@ func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, e agentNodes := []*k3d.Node{} var serverlb *k3d.Node for _, node := range nodes { - log.Tracef("FilterNodes (%+v): Checking node role %s", filters, node.Role) if node.Role == k3d.ServerRole { serverNodes = append(serverNodes, node) } else if node.Role == k3d.AgentRole { @@ -89,7 +127,7 @@ func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, e if len(filters) > 1 { log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters) } - return result, nil + return nodes, nil } // Choose the group of nodes to operate on diff --git a/tests/assets/config_test_simple.yaml b/tests/assets/config_test_simple.yaml index 0d4440fa..67c3fd6d 100755 --- a/tests/assets/config_test_simple.yaml +++ b/tests/assets/config_test_simple.yaml @@ -41,11 +41,11 @@ options: extraArgs: - arg: --tls-san=127.0.0.1 nodeFilters: - - server[*] + - server:* nodeLabels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer kubeconfig: updateDefaultKubeconfig: true @@ -54,5 +54,5 @@ options: labels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer diff --git a/tests/assets/config_test_simple_migration_v1alpha2.yaml b/tests/assets/config_test_simple_migration_v1alpha2.yaml index 4d9f7255..26140674 100755 --- a/tests/assets/config_test_simple_migration_v1alpha2.yaml +++ b/tests/assets/config_test_simple_migration_v1alpha2.yaml @@ -25,7 +25,7 @@ env: labels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer registries: create: true diff --git a/tests/assets/config_test_simple_migration_v1alpha3.yaml b/tests/assets/config_test_simple_migration_v1alpha3.yaml index 98eccbde..f2203186 100755 --- a/tests/assets/config_test_simple_migration_v1alpha3.yaml +++ b/tests/assets/config_test_simple_migration_v1alpha3.yaml @@ -41,7 +41,7 @@ options: extraArgs: - arg: --tls-san=127.0.0.1 nodeFilters: - - server[*] + - server:* kubeconfig: updateDefaultKubeconfig: true switchCurrentContext: true @@ -49,5 +49,5 @@ options: labels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer diff --git a/tests/test_basic.sh b/tests/test_basic.sh index 5a09aba7..8969c410 100755 --- a/tests/test_basic.sh +++ b/tests/test_basic.sh @@ -9,7 +9,7 @@ source "$CURR_DIR/common.sh" export CURRENT_STAGE="Test | basic" info "Creating two clusters..." -$EXE cluster create c1 --wait --timeout 60s --api-port 6443 --env 'TEST_VAR=user\@pass\\@server[0]' || failed "could not create cluster c1" +$EXE cluster create c1 --wait --timeout 60s --api-port 6443 --env 'TEST_VAR=user\@pass\\@server:0' || failed "could not create cluster c1" $EXE cluster create c2 --wait --timeout 60s || failed "could not create cluster c2" info "Checking that we can get both clusters..." diff --git a/tests/test_config_with_overrides.sh b/tests/test_config_with_overrides.sh index 6b705a06..8269a326 100755 --- a/tests/test_config_with_overrides.sh +++ b/tests/test_config_with_overrides.sh @@ -21,7 +21,7 @@ clustername="cfgoverridetest" highlight "[START] Config With Override $EXTRA_TITLE" info "Creating cluster $clustername..." -$EXE cluster create "$clustername" --config "$CURR_DIR/assets/config_test_simple.yaml" --servers 4 -v /tmp/test:/tmp/test@loadbalancer --registry-create=false --env "x=y@agent[1]" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE" +$EXE cluster create "$clustername" --config "$CURR_DIR/assets/config_test_simple.yaml" --servers 4 -v /tmp/test:/tmp/test@loadbalancer --registry-create=false --env "x=y@agent:1" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE" info "Sleeping for 5 seconds to give the cluster enough time to get ready..." sleep 5 From 5ce9f82428a4ce8259875a43dc9163450f63e8e7 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 30 Jun 2021 09:41:57 +0200 Subject: [PATCH 28/46] remove debug logs for merged kubeconfig as they add too much noise --- pkg/client/kubeconfig.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/pkg/client/kubeconfig.go b/pkg/client/kubeconfig.go index a016f153..cd47b6ee 100644 --- a/pkg/client/kubeconfig.go +++ b/pkg/client/kubeconfig.go @@ -278,13 +278,6 @@ func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, ex existingKubeConfig.CurrentContext = newKubeConfig.CurrentContext } - kubeconfigYaml, err := clientcmd.Write(*existingKubeConfig) - if err != nil { - log.Debugf("Merged Kubeconfig:\n%+v", existingKubeConfig) - } else { - log.Tracef("Merged Kubeconfig:\n%s", kubeconfigYaml) - } - return KubeconfigWrite(ctx, existingKubeConfig, outPath) } From 6c06208b8f80ccc89ae1ff5de233d91bdeee9bc5 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:10:02 +0200 Subject: [PATCH 29/46] document using new nodefilter syntax with : instead of [] for indexing --- cmd/cluster/clusterCreate.go | 12 ++++++------ cmd/debug/debug.go | 2 +- docs/usage/commands/k3d_cluster_create.md | 10 +++++----- docs/usage/configfile.md | 10 +++++----- docs/usage/guides/exposing_services.md | 4 ++-- pkg/config/v1alpha3/migrations.go | 4 ++-- pkg/config/v1alpha3/schema.json | 2 +- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/cmd/cluster/clusterCreate.go b/cmd/cluster/clusterCreate.go index c238a736..543084d7 100644 --- a/cmd/cluster/clusterCreate.go +++ b/cmd/cluster/clusterCreate.go @@ -295,23 +295,23 @@ func NewCmdClusterCreate() *cobra.Command { cmd.Flags().String("api-port", "", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550`") _ = ppViper.BindPFlag("cli.api-port", cmd.Flags().Lookup("api-port")) - cmd.Flags().StringArrayP("env", "e", nil, "Add environment variables to nodes (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -e \"HTTP_PROXY=my.proxy.com@server[0]\" -e \"SOME_KEY=SOME_VAL@server[0]\"`") + cmd.Flags().StringArrayP("env", "e", nil, "Add environment variables to nodes (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -e \"HTTP_PROXY=my.proxy.com@server:0\" -e \"SOME_KEY=SOME_VAL@server:0\"`") _ = ppViper.BindPFlag("cli.env", cmd.Flags().Lookup("env")) - cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`") + cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server:0`") _ = ppViper.BindPFlag("cli.volumes", cmd.Flags().Lookup("volume")) - cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`") + cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent:0 -p 8081@agent[1]`") _ = ppViper.BindPFlag("cli.ports", cmd.Flags().Lookup("port")) - cmd.Flags().StringArrayP("k3s-node-label", "", nil, "Add label to k3s node (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --k3s-node-label \"my.label@agent[0,1]\" --k3s-node-label \"other.label=somevalue@server[0]\"`") + cmd.Flags().StringArrayP("k3s-node-label", "", nil, "Add label to k3s node (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --k3s-node-label \"my.label@agent[0,1]\" --k3s-node-label \"other.label=somevalue@server:0\"`") _ = ppViper.BindPFlag("cli.k3s-node-labels", cmd.Flags().Lookup("k3s-node-label")) - cmd.Flags().StringArrayP("runtime-label", "", nil, "Add label to container runtime (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --runtime-label \"my.label@agent[0,1]\" --runtime-label \"other.label=somevalue@server[0]\"`") + cmd.Flags().StringArrayP("runtime-label", "", nil, "Add label to container runtime (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --runtime-label \"my.label@agent[0,1]\" --runtime-label \"other.label=somevalue@server:0\"`") _ = ppViper.BindPFlag("cli.runtime-labels", cmd.Flags().Lookup("runtime-label")) /* k3s */ - cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server[0]\"") + cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server:0\"") _ = cfgViper.BindPFlag("cli.k3sargs", cmd.Flags().Lookup("k3s-arg")) /****************** diff --git a/cmd/debug/debug.go b/cmd/debug/debug.go index c524790b..19a58b6a 100644 --- a/cmd/debug/debug.go +++ b/cmd/debug/debug.go @@ -67,7 +67,7 @@ func NewCmdDebugLoadbalancer() *cobra.Command { } cmd.AddCommand(&cobra.Command{ - Use: "get-config", + Use: "get-config CLUSTERNAME", Args: cobra.ExactArgs(1), // cluster name Run: func(cmd *cobra.Command, args []string) { c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &types.Cluster{Name: args[0]}) diff --git a/docs/usage/commands/k3d_cluster_create.md b/docs/usage/commands/k3d_cluster_create.md index f784462d..e0969db9 100644 --- a/docs/usage/commands/k3d_cluster_create.md +++ b/docs/usage/commands/k3d_cluster_create.md @@ -25,23 +25,23 @@ k3d cluster create NAME [flags] - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550` -c, --config string Path of a config file to use -e, --env KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add environment variables to nodes (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] - - Example: `k3d cluster create --agents 2 -e "HTTP_PROXY=my.proxy.com@server[0]" -e "SOME_KEY=SOME_VAL@server[0]"` + - Example: `k3d cluster create --agents 2 -e "HTTP_PROXY=my.proxy.com@server:0" -e "SOME_KEY=SOME_VAL@server:0"` --gpus string GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker] -h, --help help for create -i, --image string Specify k3s image that you want to use for the nodes --k3s-arg ARG@NODEFILTER[;@NODEFILTER] Additional args passed to k3s command (Format: ARG@NODEFILTER[;@NODEFILTER]) - - Example: `k3d cluster create --k3s-arg "--disable=traefik@server[0]" + - Example: `k3d cluster create --k3s-arg "--disable=traefik@server:0" --kubeconfig-switch-context Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default) (default true) --kubeconfig-update-default Directly update the default kubeconfig with the new cluster's context (default true) -l, --label KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add label to node container (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] - - Example: `k3d cluster create --agents 2 -l "my.label@agent[0,1]" -l "other.label=somevalue@server[0]"` + - Example: `k3d cluster create --agents 2 -l "my.label@agent[0,1]" -l "other.label=somevalue@server:0"` --network string Join an existing network --no-hostip Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS --no-image-volume Disable the creation of a volume for importing images --no-lb Disable the creation of a LoadBalancer in front of the server nodes --no-rollback Disable the automatic rollback actions, if anything goes wrong -p, --port [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER] Map ports from the node containers to the host (Format: [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]) - - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]` + - Example: `k3d cluster create --agents 2 -p 8080:80@agent:0 -p 8081@agent[1]` --registry-config string Specify path to an extra registries.yaml file --registry-create Create a k3d-managed registry and connect it to the cluster --registry-use stringArray Connect to one or more k3d-managed registries running locally @@ -51,7 +51,7 @@ k3d cluster create NAME [flags] --timeout duration Rollback changes if cluster couldn't be created in specified duration. --token string Specify a cluster token. By default, we generate one. -v, --volume [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]] Mount volumes into the nodes (Format: [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]] - - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]` + - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server:0` --wait Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever. (default true) ``` diff --git a/docs/usage/configfile.md b/docs/usage/configfile.md index 97fa45c0..4f5178fd 100644 --- a/docs/usage/configfile.md +++ b/docs/usage/configfile.md @@ -19,7 +19,7 @@ Using a config file is as easy as putting it in a well-known place in your file - All options in config file: `k3d cluster create --config /home/me/my-awesome-config.yaml` (must be `.yaml`/`.yml`) - With CLI override (name): `k3d cluster create somename --config /home/me/my-awesome-config.yaml` -- With CLI override (extra volume): `k3d cluster create --config /home/me/my-awesome-config.yaml --volume '/some/path:/some:path@server[0]'` +- With CLI override (extra volume): `k3d cluster create --config /home/me/my-awesome-config.yaml --volume '/some/path:/some:path@server:0'` ## Required Fields @@ -64,9 +64,9 @@ image: rancher/k3s:v1.20.4-k3s1 # same as `--image rancher/k3s:v1.20.4-k3s1` network: my-custom-net # same as `--network my-custom-net` token: superSecretToken # same as `--token superSecretToken` volumes: # repeatable flags are represented as YAML lists - - volume: /my/host/path:/path/in/node # same as `--volume '/my/host/path:/path/in/node@server[0];agent[*]'` + - volume: /my/host/path:/path/in/node # same as `--volume '/my/host/path:/path/in/node@server:0;agent[*]'` nodeFilters: - - server[0] + - server:0 - agent[*] ports: - port: 8080:80 # same as `--port '8080:80@loadbalancer'` @@ -77,9 +77,9 @@ labels: nodeFilters: - agent[1] env: - - envVar: bar=baz # same as `--env 'bar=baz@server[0]'` + - envVar: bar=baz # same as `--env 'bar=baz@server:0'` nodeFilters: - - server[0] + - server:0 registries: # define how registries should be created or used create: true # creates a default registry to be used with the cluster; same as `--registry-create` use: diff --git a/docs/usage/guides/exposing_services.md b/docs/usage/guides/exposing_services.md index 07b4cc7b..33b0d379 100644 --- a/docs/usage/guides/exposing_services.md +++ b/docs/usage/guides/exposing_services.md @@ -62,10 +62,10 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh 1. Create a cluster, mapping the port `30080` from `agent-0` to `localhost:8082` - `#!bash k3d cluster create mycluster -p "8082:30080@agent[0]" --agents 2` + `#!bash k3d cluster create mycluster -p "8082:30080@agent:0" --agents 2` - **Note 1**: Kubernetes' default NodePort range is [`30000-32767`](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport) - - **Note 2**: You may as well expose the whole NodePort range from the very beginning, e.g. via `k3d cluster create mycluster --agents 3 -p "30000-32767:30000-32767@server[0]"` (See [this video from @portainer](https://www.youtube.com/watch?v=5HaU6338lAk)) + - **Note 2**: You may as well expose the whole NodePort range from the very beginning, e.g. via `k3d cluster create mycluster --agents 3 -p "30000-32767:30000-32767@server:0"` (See [this video from @portainer](https://www.youtube.com/watch?v=5HaU6338lAk)) - **Warning**: Docker creates iptable entries and a new proxy process per port-mapping, so this may take a very long time or even freeze your system! ... (Steps 2 and 3 like above) ... diff --git a/pkg/config/v1alpha3/migrations.go b/pkg/config/v1alpha3/migrations.go index fd1bc6dc..b1068de3 100644 --- a/pkg/config/v1alpha3/migrations.go +++ b/pkg/config/v1alpha3/migrations.go @@ -64,7 +64,7 @@ func MigrateV1Alpha2(input configtypes.Config) (configtypes.Config, error) { cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, K3sArgWithNodeFilters{ Arg: arg, NodeFilters: []string{ - "server[*]", + "server:*", }, }) } @@ -73,7 +73,7 @@ func MigrateV1Alpha2(input configtypes.Config) (configtypes.Config, error) { cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, K3sArgWithNodeFilters{ Arg: arg, NodeFilters: []string{ - "agent[*]", + "agent:*", }, }) } diff --git a/pkg/config/v1alpha3/schema.json b/pkg/config/v1alpha3/schema.json index 1deb0060..f04bf00e 100644 --- a/pkg/config/v1alpha3/schema.json +++ b/pkg/config/v1alpha3/schema.json @@ -257,7 +257,7 @@ "examples": [ "loadbalancer", "server[*]", - "server[0]", + "server:0", "agent[1]", "all" ] From 877849b2da9ec7182aca4af49d00866eef08be3d Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:11:48 +0200 Subject: [PATCH 30/46] tests/e2e: add test for loadbalancer --- tests/test_loadbalancer.sh | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100755 tests/test_loadbalancer.sh diff --git a/tests/test_loadbalancer.sh b/tests/test_loadbalancer.sh new file mode 100755 index 00000000..9fbde168 --- /dev/null +++ b/tests/test_loadbalancer.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; } + +# shellcheck source=./common.sh +source "$CURR_DIR/common.sh" + +export CURRENT_STAGE="Test | Loadbalancer" + +highlight "[START] LoadbalancerTest $EXTRA_TITLE" + +function check_container_port() { + # $1 = container name + # $2 = wanted port + exists=$(docker inspect "$1" --format '{{ range $k, $_ := .NetworkSettings.Ports }}{{ if eq $k "'"$2"'" }}true{{ end }}{{ end }}') + if [[ $exists == "true" ]]; then + return 0 + else + docker inspect "$1" --format '{{ range $k, $_ := .NetworkSettings.Ports }}{{ printf "%s\n" $k }}{{ end }}' + return 1 + fi +} + +clustername="lbtest" + +info "Creating cluster $clustername..." +$EXE cluster create $clustername --timeout 360s --agents 1 -p 8080:80@server:0:proxy -p 1234:4321/tcp@agent:0:direct || failed "could not create cluster $clustername" + +info "Checking we have access to the cluster..." +check_clusters "$clustername" || failed "error checking cluster" + +info "Checking Container Ports..." +check_container_port k3d-$clustername-serverlb "6443/tcp" || failed "6443/tcp not on serverlb" +check_container_port k3d-$clustername-serverlb "80/tcp" || failed "80/tcp not on serverlb" +check_container_port k3d-$clustername-agent-0 "4321/tcp" || failed "4321/tcp not on agent-0" + +info "Checking Loadbalancer Config..." +$EXE debug loadbalancer get-config $clustername | grep -A1 "80.tcp" | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" + +info "Deleting clusters..." +$EXE cluster delete $clustername || failed "could not delete the cluster $clustername" + +exit 0 + + From 31a1ac1d707fafbc31dc1fe19d63f01e53fb60e9 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:13:18 +0200 Subject: [PATCH 31/46] client/cluster:make injecting the coredns entry for host.k3d.internal more robust --- pkg/client/cluster.go | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index 6ebb82b8..b387d722 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -26,6 +26,7 @@ import ( _ "embed" "errors" "fmt" + "io/ioutil" "sort" "strconv" "time" @@ -511,7 +512,7 @@ ClusterCreatOpts: if err != nil { return fmt.Errorf("error generating loadbalancer config: %v", err) } - cluster.ServerLoadBalancer.Config = lbConfig + cluster.ServerLoadBalancer.Config = &lbConfig } cluster.ServerLoadBalancer.Node.RuntimeLabels = clusterCreateOpts.GlobalLabels @@ -956,9 +957,29 @@ func prepInjectHostIP(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.C hostRecordSuccessMessage += fmt.Sprintf("Successfully added host record to /etc/hosts in %d/%d nodes", (len(cluster.Nodes) - etcHostsFailureCount), len(cluster.Nodes)) } - patchCmd := `test=$(kubectl get cm coredns -n kube-system --template='{{.data.NodeHosts}}' | sed -n -E -e '/[0-9\.]{4,12}\s+host\.k3d\.internal$/!p' -e '$a` + hostsEntry + `' | tr '\n' '^' | busybox xargs -0 printf '{"data": {"NodeHosts":"%s"}}'| sed -E 's%\^%\\n%g') && kubectl patch cm coredns -n kube-system -p="$test"` - if err = runtime.ExecInNode(ctx, cluster.Nodes[0], []string{"sh", "-c", patchCmd}); err != nil { - log.Warnf("Failed to patch CoreDNS ConfigMap to include entry '%s': %+v", hostsEntry, err) + patchCmd := `patch=$(kubectl get cm coredns -n kube-system --template='{{.data.NodeHosts}}' | sed -n -E -e '/[0-9\.]{4,12}\s+host\.k3d\.internal$/!p' -e '$a` + hostsEntry + `' | tr '\n' '^' | busybox xargs -0 printf '{"data": {"NodeHosts":"%s"}}'| sed -E 's%\^%\\n%g') && kubectl patch cm coredns -n kube-system -p="$patch"` + successInjectCoreDNSEntry := false + for _, node := range cluster.Nodes { + + if node.Role == k3d.AgentRole || node.Role == k3d.ServerRole { + logreader, err := runtime.ExecInNodeGetLogs(ctx, node, []string{"sh", "-c", patchCmd}) + if err == nil { + successInjectCoreDNSEntry = true + break + } else { + msg := fmt.Sprintf("error patching the CoreDNS ConfigMap to include entry '%s': %+v", hostsEntry, err) + readlogs, err := ioutil.ReadAll(logreader) + if err != nil { + log.Debugf("error reading the logs from failed CoreDNS patch exec process in node %s: %v", node.Name, err) + } else { + msg += fmt.Sprintf("\nLogs: %s", string(readlogs)) + } + log.Debugln(msg) + } + } + } + if successInjectCoreDNSEntry == false { + log.Warnf("Failed to patch CoreDNS ConfigMap to include entry '%s' (see debug logs)", hostsEntry) } else { hostRecordSuccessMessage += " and to the CoreDNS ConfigMap" } From 6e8b27f99f08a44ac145cad640e6113588db9f0b Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:14:05 +0200 Subject: [PATCH 32/46] fix usage of the new loadbalancer type and ordering when cluster is created --- pkg/client/loadbalancer.go | 9 ++++++++- pkg/config/transform.go | 16 ++++++++++------ pkg/runtimes/docker/translate.go | 3 +-- pkg/types/loadbalancer.go | 11 ++++++++--- pkg/util/filter.go | 2 +- 5 files changed, 28 insertions(+), 13 deletions(-) diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index dc37d5a6..fe1989b4 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -112,7 +112,8 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste var cfg k3d.LoadbalancerConfig - if cluster.ServerLoadBalancer == nil { + if cluster.ServerLoadBalancer == nil || cluster.ServerLoadBalancer.Node == nil { + cluster.ServerLoadBalancer = &k3d.Loadbalancer{} for _, node := range cluster.Nodes { if node.Role == types.LoadBalancerRole { var err error @@ -185,6 +186,12 @@ func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster } cluster.ServerLoadBalancer.Node.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} + if cluster.ServerLoadBalancer.Config == nil { + cluster.ServerLoadBalancer.Config = &k3d.LoadbalancerConfig{ + Ports: map[string][]string{}, + } + } + // Create LB as a modified node with loadbalancerRole lbNode := &k3d.Node{ Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), diff --git a/pkg/config/transform.go b/pkg/config/transform.go index 0d35677b..417869c8 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -61,6 +61,9 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim if simpleConfig.Network != "" { clusterNetwork.Name = simpleConfig.Network clusterNetwork.External = true + } else { + clusterNetwork.Name = fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, simpleConfig.Name) + clusterNetwork.External = false } if simpleConfig.Subnet != "" { @@ -109,6 +112,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim if err != nil { return nil, fmt.Errorf("error preparing the loadbalancer: %w", err) } + newCluster.Nodes = append(newCluster.Nodes, newCluster.ServerLoadBalancer.Node) } else { log.Debugln("Disabling the load balancer") } @@ -133,6 +137,8 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } newCluster.Nodes = append(newCluster.Nodes, &serverNode) + + newCluster.ServerLoadBalancer.Config.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)] = append(newCluster.ServerLoadBalancer.Config.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)], serverNode.Name) } for i := 0; i < simpleConfig.Agents; i++ { @@ -150,12 +156,8 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim ****************************/ // -> VOLUMES - nodeCount := simpleConfig.Servers + simpleConfig.Agents + nodeCount := len(newCluster.Nodes) nodeList := newCluster.Nodes - if !simpleConfig.Options.K3dOptions.DisableLoadbalancer { - nodeCount++ - nodeList = append(nodeList, newCluster.ServerLoadBalancer.Node) - } for _, volumeWithNodeFilters := range simpleConfig.Volumes { nodes, err := util.FilterNodes(nodeList, volumeWithNodeFilters.NodeFilters) if err != nil { @@ -191,7 +193,9 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return nil, err } for _, pm := range portmappings { - loadbalancerAddPortConfigs(newCluster.ServerLoadBalancer, pm, nodes) + if err := loadbalancerAddPortConfigs(newCluster.ServerLoadBalancer, pm, nodes); err != nil { + return nil, err + } } } else if suffix == "direct" { for _, node := range nodes { diff --git a/pkg/runtimes/docker/translate.go b/pkg/runtimes/docker/translate.go index 564b0f6f..672061bf 100644 --- a/pkg/runtimes/docker/translate.go +++ b/pkg/runtimes/docker/translate.go @@ -145,8 +145,7 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) { if len(node.Networks) > 0 { netInfo, err := GetNetwork(context.Background(), node.Networks[0]) // FIXME: only considering first network here, as that's the one k3d creates for a cluster if err != nil { - log.Warnln("Failed to get network information") - log.Warnln(err) + log.Warnf("Failed to get network information: %v", err) } else if netInfo.Driver == "host" { hostConfig.NetworkMode = "host" } diff --git a/pkg/types/loadbalancer.go b/pkg/types/loadbalancer.go index 158fe61b..caaba800 100644 --- a/pkg/types/loadbalancer.go +++ b/pkg/types/loadbalancer.go @@ -34,8 +34,8 @@ package types */ type Loadbalancer struct { - Node *Node `mapstructure:",squash" yaml:",inline"` // the underlying node - Config LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration + Node *Node `mapstructure:",squash" yaml:",inline"` // the underlying node + Config *LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration } func NewLoadbalancer() *Loadbalancer { @@ -44,7 +44,12 @@ func NewLoadbalancer() *Loadbalancer { Role: LoadBalancerRole, Image: GetLoadbalancerImage(), }, - Config: LoadbalancerConfig{Ports: map[string][]string{}}, + Config: &LoadbalancerConfig{ + Ports: map[string][]string{}, + Settings: LoadBalancerSettings{ + WorkerProcesses: DefaultLoadbalancerWorkerProcesses, + }, + }, } } diff --git a/pkg/util/filter.go b/pkg/util/filter.go index 3ab76ea8..7df75314 100644 --- a/pkg/util/filter.go +++ b/pkg/util/filter.go @@ -37,7 +37,7 @@ const ( ) // Regexp pattern to match node filters -var NodeFilterRegexp = regexp.MustCompile(`^(?Pserver|servers|agent|agents|loadbalancer|all)(?P:(?P(?P(\d+,?)+)|(?P\d*-\d*)|(?P\*)))?(?P:[[:alpha:]]+)?$`) +var NodeFilterRegexp = regexp.MustCompile(`^(?Pserver|servers|agent|agents|loadbalancer|all)(?P:(?P(?P(\d+,?)+)|(?P\d*-\d*)|(?P\*)))?(?P:(?P[[:alpha:]]+))?$`) // FilterNodesBySuffix properly interprets NodeFilters with suffix func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string][]*k3d.Node, error) { From 28ff88a1edf2316c5b47991d9665330d3796fe5e Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:26:50 +0200 Subject: [PATCH 33/46] fix missing merge conflict resolutions --- go.mod | 1 + go.sum | 1 + pkg/client/node.go | 7 ------- vendor/modules.txt | 3 +++ 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index b0d20808..6893fcd8 100644 --- a/go.mod +++ b/go.mod @@ -30,6 +30,7 @@ require ( github.com/spf13/viper v1.8.1 github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonschema v1.2.0 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect golang.org/x/text v0.3.6 // indirect gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 6f0cbba9..879f84b4 100644 --- a/go.sum +++ b/go.sum @@ -686,6 +686,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/pkg/client/node.go b/pkg/client/node.go index 21946798..aae7bc46 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -34,10 +34,7 @@ import ( "time" copystruct "github.com/mitchellh/copystructure" -<<<<<<< HEAD "gopkg.in/yaml.v2" -======= ->>>>>>> 9a18c3a3 ([FEATURE] add ability to add ports to an existing loadbalancer (#615)) "github.com/docker/go-connections/nat" dockerunits "github.com/docker/go-units" @@ -741,11 +738,7 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No // start new node log.Infof("Starting new node %s...", new.Name) -<<<<<<< HEAD if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil { -======= - if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true}); err != nil { ->>>>>>> 9a18c3a3 ([FEATURE] add ability to add ports to an existing loadbalancer (#615)) if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil { return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err) } diff --git a/vendor/modules.txt b/vendor/modules.txt index d499b5ef..54f233ab 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -233,6 +233,9 @@ golang.org/x/net/proxy # golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 golang.org/x/oauth2 golang.org/x/oauth2/internal +# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +## explicit +golang.org/x/sync/errgroup # golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 ## explicit golang.org/x/sys/execabs From 8da0f7d7dc6169237b604006491844f8a7ebc09b Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:34:56 +0200 Subject: [PATCH 34/46] add forgotten dependency --- vendor/golang.org/x/sync/AUTHORS | 3 + vendor/golang.org/x/sync/CONTRIBUTORS | 3 + vendor/golang.org/x/sync/LICENSE | 27 ++++++++ vendor/golang.org/x/sync/PATENTS | 22 +++++++ vendor/golang.org/x/sync/errgroup/errgroup.go | 66 +++++++++++++++++++ 5 files changed, 121 insertions(+) create mode 100644 vendor/golang.org/x/sync/AUTHORS create mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 00000000..9857fe53 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "context" + "sync" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} From 1730ca5dfbb82c47fb5bb686bdaab0831748cdf2 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 14:01:47 +0200 Subject: [PATCH 35/46] fix nilpointer exception when cluster loadbalancer is not specified --- pkg/client/node.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/client/node.go b/pkg/client/node.go index aae7bc46..09e83cf0 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -681,6 +681,9 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang if err != nil { return fmt.Errorf("error updating loadbalancer config: %w", err) } + if cluster.ServerLoadBalancer == nil { + cluster.ServerLoadBalancer = k3d.NewLoadbalancer() + } cluster.ServerLoadBalancer.Node = result lbConfig, err := LoadbalancerGenerateConfig(cluster) if err != nil { From 74b93fda45800dd96bc7e8788faa48429f368f7d Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 14:25:51 +0200 Subject: [PATCH 36/46] test/e2e: loadbalancer test case no suffix --- tests/test_loadbalancer.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/test_loadbalancer.sh b/tests/test_loadbalancer.sh index 9fbde168..664c386f 100755 --- a/tests/test_loadbalancer.sh +++ b/tests/test_loadbalancer.sh @@ -25,16 +25,26 @@ function check_container_port() { clustername="lbtest" info "Creating cluster $clustername..." -$EXE cluster create $clustername --timeout 360s --agents 1 -p 8080:80@server:0:proxy -p 1234:4321/tcp@agent:0:direct || failed "could not create cluster $clustername" +$EXE cluster create $clustername --timeout 360s --agents 1 -p 2222:3333@server:0 -p 8080:80@server:0:proxy -p 1234:4321/tcp@agent:0:direct || failed "could not create cluster $clustername" info "Checking we have access to the cluster..." check_clusters "$clustername" || failed "error checking cluster" info "Checking Container Ports..." + +info "> Checking automatic port mapping for Kube API on loadbalancer (6443)..." check_container_port k3d-$clustername-serverlb "6443/tcp" || failed "6443/tcp not on serverlb" + +info "> Checking explicit proxy port mapping of port 80 -> loadbalancer -> server-0" check_container_port k3d-$clustername-serverlb "80/tcp" || failed "80/tcp not on serverlb" + +info "> Checking explicit direct port mapping of port 4321 -> agent-0" check_container_port k3d-$clustername-agent-0 "4321/tcp" || failed "4321/tcp not on agent-0" +info "> Checking implicit proxy port mapping of port 3333 -> loadbalancer -> server-0" +check_container_port k3d-$clustername-server-0 "3333/tcp" && failed "3333/tcp on server-0 but should be on serverlb" +check_container_port k3d-$clustername-serverlb "3333/tcp" || failed "3333/tcp not on serverlb" + info "Checking Loadbalancer Config..." $EXE debug loadbalancer get-config $clustername | grep -A1 "80.tcp" | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" From c7ace60c50a986c20329faacf5f21ada61f5c169 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 14:41:18 +0200 Subject: [PATCH 37/46] changelog: add some notes about the new nodefilters and port-mapping behavior --- CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e86ce33f..9d8826e9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,18 @@ ## v5.0.0 +### Breaking Changes + +- new syntax for nodefilters + - dropped the usage of square brackets `[]` for indexing, as it caused problems with some shells trying to interpret them + - new syntax: `@identifier[:index][:opt]` (see ) + - example for a port-mapping: `--port 8080:80@server:0:proxy` + - identifier = `server`, index = `0`, opt = `proxy` + - `opt` is an extra optional argument used for different purposes depending on the flag + - currently, only the `--port` flag has `opt`s, namely `proxy` and `direct` (see other breaking change) + +- + ### Fixes - cleaned up and properly sorted the sanitization of existing resources used to create new nodes (#638) From d6bf08feed7f5d71cc00a661bd54750e7eb53bec Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 09:18:54 +0200 Subject: [PATCH 38/46] properly handle --port 1234:4321@loadbalancer:proxy style port mappings which should default to all nodes as upstream --- CHANGELOG.md | 5 +++-- Dockerfile | 2 +- pkg/config/transform.go | 21 +++++++++++++++++-- pkg/util/filter.go | 41 +++++++++++++++++++++++++++----------- tests/test_loadbalancer.sh | 17 ++++++++++++++-- 5 files changed, 67 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d8826e9..473948f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,8 +11,9 @@ - identifier = `server`, index = `0`, opt = `proxy` - `opt` is an extra optional argument used for different purposes depending on the flag - currently, only the `--port` flag has `opt`s, namely `proxy` and `direct` (see other breaking change) - -- +- port-mapping now go via the loadbalancer (serverlb) by default + - the `--port` flag has the `proxy` opt (see new nodefilter syntax above) set by default + - to leverage the old behavior of direct port-mappings, use the `direct` opt on the port flag ### Fixes diff --git a/Dockerfile b/Dockerfile index 1aa1b056..df9d5021 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ COPY . . RUN make build -e GIT_TAG_OVERRIDE=${GIT_TAG_OVERRIDE} && bin/k3d version FROM docker:20.10-dind as dind -RUN apk update && apk add bash curl sudo jq git make netcat-openbsd +RUN apk update && apk add bash curl sudo jq yq git make netcat-openbsd RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl && \ chmod +x ./kubectl && \ mv ./kubectl /usr/local/bin/kubectl diff --git a/pkg/config/transform.go b/pkg/config/transform.go index 417869c8..cc812374 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -175,16 +175,28 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return nil, fmt.Errorf("Portmapping '%s' lacks a node filter, but there's more than one node", portWithNodeFilters.Port) } - x, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) + filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) if err != nil { return nil, err } - for suffix, nodes := range x { + for suffix, nodes := range filteredNodes { portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) if err != nil { return nil, fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err) } + + for _, n := range nodes { + if n.Role == k3d.LoadBalancerRole && n.Name == newCluster.ServerLoadBalancer.Node.Name { + log.Infoln("loadbalancer in filtered list for port mappings: defaulting to all servers and agents as upstream targets") + var err error + nodes, err = util.FilterNodes(newCluster.Nodes, []string{"agents:*", "servers:*"}) + if err != nil { + return nil, err + } + } + } + if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings if newCluster.ServerLoadBalancer == nil { return nil, fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled") @@ -203,6 +215,8 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return nil, err } } + } else if suffix != util.NodeFilterMapKeyAll { + return nil, fmt.Errorf("error adding port mappings: unknown suffix %s", suffix) } } @@ -396,6 +410,9 @@ func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, pm nat.PortMappi portconfig := fmt.Sprintf("%s.%s", pm.Port.Port(), pm.Port.Proto()) nodenames := []string{} for _, node := range nodes { + if node.Role == k3d.LoadBalancerRole { + return fmt.Errorf("error adding port config to loadbalancer: cannot add port config referencing the loadbalancer itself (loop)") + } nodenames = append(nodenames, node.Name) } diff --git a/pkg/util/filter.go b/pkg/util/filter.go index 7df75314..7ddb04ba 100644 --- a/pkg/util/filter.go +++ b/pkg/util/filter.go @@ -36,6 +36,16 @@ const ( NodeFilterMapKeyAll = "all" ) +var ( + rolesByIdentifier = map[string]k3d.Role{ + "server": k3d.ServerRole, + "servers": k3d.ServerRole, + "agent": k3d.AgentRole, + "agents": k3d.AgentRole, + "loadbalancer": k3d.LoadBalancerRole, + } +) + // Regexp pattern to match node filters var NodeFilterRegexp = regexp.MustCompile(`^(?Pserver|servers|agent|agents|loadbalancer|all)(?P:(?P(?P(\d+,?)+)|(?P\d*-\d*)|(?P\*)))?(?P:(?P[[:alpha:]]+))?$`) @@ -74,7 +84,7 @@ func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string] return nil, err } - log.Tracef("Adding %d nodes for suffix >%s< (filter: %s)", len(filteredNodes), suffix, nf) + log.Tracef("Filtered %d nodes for suffix '%s' (filter: %s)", len(filteredNodes), suffix, nf) result[suffix] = filteredNodes } @@ -132,16 +142,21 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { // Choose the group of nodes to operate on groupNodes := []*k3d.Node{} - if submatches["group"] == string(k3d.ServerRole) { - groupNodes = serverNodes - } else if submatches["group"] == string(k3d.AgentRole) { - groupNodes = agentNodes - } else if submatches["group"] == string(k3d.LoadBalancerRole) { - if serverlb == nil { - return nil, fmt.Errorf("Node filter '%s' targets a node that does not exist (disabled?)", filter) + if role, ok := rolesByIdentifier[submatches["group"]]; ok { + switch role { + case k3d.ServerRole: + groupNodes = serverNodes + break + case k3d.AgentRole: + groupNodes = agentNodes + break + case k3d.LoadBalancerRole: + if serverlb == nil { + return nil, fmt.Errorf("Node filter '%s' targets a node that does not exist (disabled?)", filter) + } + filteredNodes = append(filteredNodes, serverlb) + return filteredNodes, nil // early exit if filtered group is the loadbalancer } - filteredNodes = append(filteredNodes, serverlb) - return filteredNodes, nil // early exit if filtered group is the loadbalancer } /* Option 1) subset defined by list */ @@ -166,10 +181,10 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { } else if submatches["subsetRange"] != "" { /* - * subset specified by a range 'START:END', where each side is optional + * subset specified by a range 'START-END', where each side is optional */ - split := strings.Split(submatches["subsetRange"], ":") + split := strings.Split(submatches["subsetRange"], "-") if len(split) != 2 { return nil, fmt.Errorf("Failed to parse subset range in '%s'", filter) } @@ -226,6 +241,8 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { } + log.Tracef("Filtered %d nodes (filter: %s)", len(filteredNodes), filters) + return filteredNodes, nil } diff --git a/tests/test_loadbalancer.sh b/tests/test_loadbalancer.sh index 664c386f..e4f8327e 100755 --- a/tests/test_loadbalancer.sh +++ b/tests/test_loadbalancer.sh @@ -25,7 +25,12 @@ function check_container_port() { clustername="lbtest" info "Creating cluster $clustername..." -$EXE cluster create $clustername --timeout 360s --agents 1 -p 2222:3333@server:0 -p 8080:80@server:0:proxy -p 1234:4321/tcp@agent:0:direct || failed "could not create cluster $clustername" +$EXE cluster create $clustername --timeout 360s --agents 1 \ + -p 2222:3333@server:0 \ + -p 8080:80@server:0:proxy \ + -p 1234:4321/tcp@agent:0:direct \ + -p 4444:5555@loadbalancer:0:proxy \ + || failed "could not create cluster $clustername" info "Checking we have access to the cluster..." check_clusters "$clustername" || failed "error checking cluster" @@ -45,8 +50,16 @@ info "> Checking implicit proxy port mapping of port 3333 -> loadbalancer -> ser check_container_port k3d-$clustername-server-0 "3333/tcp" && failed "3333/tcp on server-0 but should be on serverlb" check_container_port k3d-$clustername-serverlb "3333/tcp" || failed "3333/tcp not on serverlb" +info "> Checking implicit proxy port mapping of port 5555 -> loadbalancer -> server-0 & agent-0" +check_container_port k3d-$clustername-server-0 "5555/tcp" && failed "5555/tcp on server-0 but should be on serverlb" +check_container_port k3d-$clustername-agent-0 "5555/tcp" && failed "5555/tcp on agent-0 but should be on serverlb" +check_container_port k3d-$clustername-serverlb "5555/tcp" || failed "5555/tcp not on serverlb" + info "Checking Loadbalancer Config..." -$EXE debug loadbalancer get-config $clustername | grep -A1 "80.tcp" | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" +$EXE debug loadbalancer get-config $clustername | yq read - 'ports."80.tcp"' | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" +$EXE debug loadbalancer get-config $clustername | yq read - 'ports."5555.tcp"' | grep "k3d-$clustername-server-0" || failed "port 5555.tcp not configured for server-0" +$EXE debug loadbalancer get-config $clustername | yq read - 'ports."5555.tcp"' | grep "k3d-$clustername-agent-0" || failed "port 5555.tcp not configured for agent-0" + info "Deleting clusters..." $EXE cluster delete $clustername || failed "could not delete the cluster $clustername" From f03156b16680e3f1ee3a4ff4223ee590a6d94ffd Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 15:16:46 +0200 Subject: [PATCH 39/46] tests/e2e: check more cases for port-mappings that should go via the loadbalancer --- tests/test_loadbalancer.sh | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tests/test_loadbalancer.sh b/tests/test_loadbalancer.sh index e4f8327e..c9d17045 100755 --- a/tests/test_loadbalancer.sh +++ b/tests/test_loadbalancer.sh @@ -30,6 +30,7 @@ $EXE cluster create $clustername --timeout 360s --agents 1 \ -p 8080:80@server:0:proxy \ -p 1234:4321/tcp@agent:0:direct \ -p 4444:5555@loadbalancer:0:proxy \ + -p 6666:7777 \ || failed "could not create cluster $clustername" info "Checking we have access to the cluster..." @@ -55,10 +56,18 @@ check_container_port k3d-$clustername-server-0 "5555/tcp" && failed "5555/tcp on check_container_port k3d-$clustername-agent-0 "5555/tcp" && failed "5555/tcp on agent-0 but should be on serverlb" check_container_port k3d-$clustername-serverlb "5555/tcp" || failed "5555/tcp not on serverlb" +info "> Checking implicit proxy port mapping of port 7777 -> loadbalancer -> server-0 & agent-0" +check_container_port k3d-$clustername-server-0 "7777/tcp" && failed "7777/tcp on server-0 but should be on serverlb" +check_container_port k3d-$clustername-agent-0 "7777/tcp" && failed "7777/tcp on agent-0 but should be on serverlb" +check_container_port k3d-$clustername-serverlb "7777/tcp" || failed "7777/tcp not on serverlb" + info "Checking Loadbalancer Config..." -$EXE debug loadbalancer get-config $clustername | yq read - 'ports."80.tcp"' | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" -$EXE debug loadbalancer get-config $clustername | yq read - 'ports."5555.tcp"' | grep "k3d-$clustername-server-0" || failed "port 5555.tcp not configured for server-0" -$EXE debug loadbalancer get-config $clustername | yq read - 'ports."5555.tcp"' | grep "k3d-$clustername-agent-0" || failed "port 5555.tcp not configured for agent-0" +LOG_LEVEL=error $EXE debug loadbalancer get-config $clustername > lbconfig.yaml +yq eval '.ports."80.tcp"' lbconfig.yaml | grep -q "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" +yq eval '.ports."5555.tcp"' lbconfig.yaml | grep -q "k3d-$clustername-server-0" || failed "port 5555.tcp not configured for server-0" +yq eval '.ports."5555.tcp"' lbconfig.yaml | grep -q "k3d-$clustername-agent-0" || failed "port 5555.tcp not configured for agent-0" +yq eval '.ports."7777.tcp"' lbconfig.yaml | grep -q "k3d-$clustername-server-0" || failed "port 7777.tcp not configured for server-0" +yq eval '.ports."7777.tcp"' lbconfig.yaml | grep -q "k3d-$clustername-agent-0" || failed "port 7777.tcp not configured for agent-0" info "Deleting clusters..." From b8132407ca86d3c4d6fd11671dc8544914970187 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 15:17:48 +0200 Subject: [PATCH 40/46] tests/e2e: quiet grep output to pollute the terminal a little less --- tests/common.sh | 6 +++--- tests/dind.sh | 2 +- tests/test_basic.sh | 2 +- tests/test_config_file.sh | 6 +++--- tests/test_config_with_overrides.sh | 8 ++++---- tests/test_node_edit.sh | 4 ++-- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/common.sh b/tests/common.sh index 4d4de061..57778213 100755 --- a/tests/common.sh +++ b/tests/common.sh @@ -117,7 +117,7 @@ check_volume_exists() { check_cluster_token_exist() { [ -n "$EXE" ] || abort "EXE is not defined" - $EXE cluster get "$1" --token | grep "TOKEN" >/dev/null 2>&1 + $EXE cluster get "$1" --token | grep -q "TOKEN" >/dev/null 2>&1 } wait_for_pod_running_by_label() { @@ -174,11 +174,11 @@ exec_in_node() { docker_assert_container_label() { # $1 = container/node name # $2 = label to assert - docker inspect --format '{{ range $k, $v := .Config.Labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' "$1" | grep -E "^$2$" + docker inspect --format '{{ range $k, $v := .Config.Labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' "$1" | grep -qE "^$2$" } k3s_assert_node_label() { # $1 = node name # $2 = label to assert - kubectl get node "$1" --output go-template='{{ range $k, $v := .metadata.labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' | grep -E "^$2$" + kubectl get node "$1" --output go-template='{{ range $k, $v := .metadata.labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' | grep -qE "^$2$" } \ No newline at end of file diff --git a/tests/dind.sh b/tests/dind.sh index edede4a7..5432ac25 100755 --- a/tests/dind.sh +++ b/tests/dind.sh @@ -38,7 +38,7 @@ trap finish EXIT # wait for the runner container to be up or exit early TIMEOUT=0 -until docker inspect "$k3de2e" | jq ".[0].State.Running" && docker logs "$k3de2e" 2>&1 | grep -i "API listen on /var/run/docker.sock"; do +until docker inspect "$k3de2e" | jq ".[0].State.Running" && docker logs "$k3de2e" 2>&1 | grep -qi "API listen on /var/run/docker.sock"; do if [[ $TIMEOUT -eq $RUNNER_START_TIMEOUT ]]; then echo "Failed to start E2E Runner Container in $RUNNER_START_TIMEOUT seconds" exit 1 diff --git a/tests/test_basic.sh b/tests/test_basic.sh index 8969c410..1c934b5f 100755 --- a/tests/test_basic.sh +++ b/tests/test_basic.sh @@ -19,7 +19,7 @@ info "Checking we have access to both clusters..." check_clusters "c1" "c2" || failed "error checking cluster" info "Checking cluster env var with escaped @ signs..." -docker exec k3d-c1-server-0 env | grep -E '^TEST_VAR=user@pass\\$' || failed "Failed to lookup proper env var in container" +docker exec k3d-c1-server-0 env | grep -qE '^TEST_VAR=user@pass\\$' || failed "Failed to lookup proper env var in container" info "Check k3s token retrieval" check_cluster_token_exist "c1" || failed "could not find cluster token c1" diff --git a/tests/test_config_file.sh b/tests/test_config_file.sh index 8793eb44..f10b988d 100755 --- a/tests/test_config_file.sh +++ b/tests/test_config_file.sh @@ -39,7 +39,7 @@ check_multi_node "$clustername" 5 || failed "failed to verify number of nodes" ## Environment Variables info "Ensuring that environment variables are present in the node containers as set in the config (with comma)" -exec_in_node "k3d-$clustername-server-0" "env" | grep "bar=baz,bob" || failed "Expected env var 'bar=baz,bob' is not present in node k3d-$clustername-server-0" +exec_in_node "k3d-$clustername-server-0" "env" | grep -q "bar=baz,bob" || failed "Expected env var 'bar=baz,bob' is not present in node k3d-$clustername-server-0" ## Container Labels info "Ensuring that container labels have been set as stated in the config" @@ -55,8 +55,8 @@ $EXE node list "k3d-$clustername-registry" || failed "Expected k3d-$clustername- ## merged registries.yaml info "Ensuring, that the registries.yaml file contains both registries" -exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -i "my.company.registry" || failed "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml" -exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -i "k3d-$clustername-registry" || failed "Expected 'k3d-$clustername-registry' to be in the /etc/rancher/k3s/registries.yaml" +exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "my.company.registry" || failed "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml" +exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "k3d-$clustername-registry" || failed "Expected 'k3d-$clustername-registry' to be in the /etc/rancher/k3s/registries.yaml" # Cleanup diff --git a/tests/test_config_with_overrides.sh b/tests/test_config_with_overrides.sh index 8269a326..a533c036 100755 --- a/tests/test_config_with_overrides.sh +++ b/tests/test_config_with_overrides.sh @@ -37,8 +37,8 @@ check_multi_node "$clustername" 6 || failed "failed to verify number of nodes" ## Environment Variables info "Ensuring that environment variables are present in the node containers as set in the config and overrides" -exec_in_node "k3d-$clustername-server-0" "env" | grep "bar=baz" || failed "Expected env var 'bar=baz' is not present in node k3d-$clustername-server-0" -exec_in_node "k3d-$clustername-agent-1" "env" | grep "x=y" || failed "Expected env var 'x=y' is not present in node k3d-$clustername-agent-1" +exec_in_node "k3d-$clustername-server-0" "env" | grep -q "bar=baz" || failed "Expected env var 'bar=baz' is not present in node k3d-$clustername-server-0" +exec_in_node "k3d-$clustername-agent-1" "env" | grep -q "x=y" || failed "Expected env var 'x=y' is not present in node k3d-$clustername-agent-1" ## Container Labels info "Ensuring that container labels have been set as stated in the config" @@ -54,8 +54,8 @@ $EXE node list "k3d-$clustername-registry" && failed "Expected k3d-$clustername- ## merged registries.yaml info "Ensuring, that the registries.yaml file contains both registries" -exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -i "my.company.registry" || failed "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml" -exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -i "k3d-$clustername-registry" && failed "Expected 'k3d-$clustername-registry' to NOT be in the /etc/rancher/k3s/registries.yaml" +exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "my.company.registry" || failed "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml" +exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "k3d-$clustername-registry" && failed "Expected 'k3d-$clustername-registry' to NOT be in the /etc/rancher/k3s/registries.yaml" # Cleanup diff --git a/tests/test_node_edit.sh b/tests/test_node_edit.sh index 80f896d3..2c6f7f2c 100755 --- a/tests/test_node_edit.sh +++ b/tests/test_node_edit.sh @@ -27,8 +27,8 @@ info "Adding port-mapping to loadbalancer..." $EXE node edit k3d-$clustername-serverlb --port-add $existingPortMappingHostPort:$existingPortMappingContainerPort --port-add $newPortMappingHostPort:$newPortMappingContainerPort || failed "failed to add port-mapping to serverlb in $clustername" info "Checking port-mappings..." -docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -E "^$existingPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" -docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -E "^$newPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" +docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -qE "^$existingPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" +docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -qE "^$newPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" info "Checking cluster access..." check_clusters "$clustername" || failed "error checking cluster access" From 9ad422ec9ebf1c5b6a3298b673c15818fa45b13f Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 15:18:29 +0200 Subject: [PATCH 41/46] dockerfile: include a specific yq version that doesn't have issues with control characters when checking the lb config --- Dockerfile | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index df9d5021..dcb93e87 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,37 @@ +############################################################ +# builder # +# -> golang image used solely for building the k3d binary # +# -> built executable can then be copied into other stages # +############################################################ FROM golang:1.16 as builder ARG GIT_TAG_OVERRIDE WORKDIR /app COPY . . RUN make build -e GIT_TAG_OVERRIDE=${GIT_TAG_OVERRIDE} && bin/k3d version +####################################################### +# dind # +# -> k3d + some tools in a docker-in-docker container # +# -> used e.g. in our CI pipelines for testing # +####################################################### FROM docker:20.10-dind as dind -RUN apk update && apk add bash curl sudo jq yq git make netcat-openbsd -RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl && \ - chmod +x ./kubectl && \ - mv ./kubectl /usr/local/bin/kubectl + +# install some basic packages needed for testing, etc. +RUN apk update && apk add bash curl sudo jq git make netcat-openbsd + +# install kubectl to interact with the k3d cluster +RUN curl -L https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && \ + chmod +x /usr/local/bin/kubectl + +# install yq (yaml processor) from source, as the busybox yq had some issues +RUN curl -L https://github.com/mikefarah/yq/releases/download/v4.9.6/yq_linux_amd64 -o /usr/bin/yq &&\ + chmod +x /usr/bin/yq COPY --from=builder /app/bin/k3d /bin/k3d +######################################### +# binary-only # +# -> only the k3d binary.. nothing else # +######################################### FROM scratch as binary-only COPY --from=builder /app/bin/k3d /bin/k3d ENTRYPOINT ["/bin/k3d"] \ No newline at end of file From 99490ca7c15d17c60612e5d0e9945fa39f4fd143 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 15:19:14 +0200 Subject: [PATCH 42/46] ports: no nodefilter or loadbalancer nodefilter should default to use all servers & agents as upstream --- CHANGELOG.md | 1 + pkg/client/loadbalancer.go | 2 +- pkg/config/transform.go | 43 +++++++++++++++++++++++++++----------- pkg/util/filter.go | 6 ++++-- 4 files changed, 37 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 473948f2..96806758 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - port-mapping now go via the loadbalancer (serverlb) by default - the `--port` flag has the `proxy` opt (see new nodefilter syntax above) set by default - to leverage the old behavior of direct port-mappings, use the `direct` opt on the port flag + - the nodefilter `loadbalancer` will now do the same as `servers:*;agents:*` (proxied via the loadbalancer) ### Fixes diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index fe1989b4..c6bec707 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -139,7 +139,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste file = bytes.Trim(file[512:], "\x00") // trim control characters, etc. if err := yaml.Unmarshal(file, &cfg); err != nil { - return cfg, err + return cfg, fmt.Errorf("error unmarshalling loadbalancer config: %w", err) } return cfg, nil diff --git a/pkg/config/transform.go b/pkg/config/transform.go index cc812374..b1850d14 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -44,6 +44,10 @@ import ( log "github.com/sirupsen/logrus" ) +var ( + DefaultTargetsNodefiltersPortMappings = []string{"servers:*:proxy", "agents:*:proxy"} +) + // TransformSimpleToClusterConfig transforms a simple configuration to a full-fledged cluster configuration func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtime, simpleConfig conf.SimpleConfig) (*conf.ClusterConfig, error) { @@ -171,8 +175,18 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim // -> PORTS for _, portWithNodeFilters := range simpleConfig.Ports { + log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters) if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { - return nil, fmt.Errorf("Portmapping '%s' lacks a node filter, but there's more than one node", portWithNodeFilters.Port) + log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings + } + + for _, f := range portWithNodeFilters.NodeFilters { + if strings.HasPrefix(f, "loadbalancer") { + log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings + break + } } filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) @@ -180,23 +194,18 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return nil, err } + nn := "" + for _, n := range filteredNodes["proxy"] { + nn = strings.Join([]string{nn, n.Name}, ",") + } + log.Debugf("Filtered nodes: %#v", nn) + for suffix, nodes := range filteredNodes { portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) if err != nil { return nil, fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err) } - for _, n := range nodes { - if n.Role == k3d.LoadBalancerRole && n.Name == newCluster.ServerLoadBalancer.Node.Name { - log.Infoln("loadbalancer in filtered list for port mappings: defaulting to all servers and agents as upstream targets") - var err error - nodes, err = util.FilterNodes(newCluster.Nodes, []string{"agents:*", "servers:*"}) - if err != nil { - return nil, err - } - } - } - if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings if newCluster.ServerLoadBalancer == nil { return nil, fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled") @@ -222,6 +231,16 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } + // print generated loadbalancer config + if log.GetLevel() >= log.DebugLevel { + yamlized, err := yaml.Marshal(newCluster.ServerLoadBalancer.Config) + if err != nil { + log.Errorf("error printing loadbalancer config: %v", err) + } else { + log.Debugf("generated loadbalancer config:\n%s", string(yamlized)) + } + } + // -> K3S NODE LABELS for _, k3sNodeLabelWithNodeFilters := range simpleConfig.Options.K3sOptions.NodeLabels { if len(k3sNodeLabelWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { diff --git a/pkg/util/filter.go b/pkg/util/filter.go index 7ddb04ba..1f9e5d70 100644 --- a/pkg/util/filter.go +++ b/pkg/util/filter.go @@ -77,7 +77,9 @@ func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string] suffix = sf } - result[suffix] = make([]*k3d.Node, 0) // init map for this suffix + if _, ok := result[suffix]; !ok { + result[suffix] = make([]*k3d.Node, 0) // init map for this suffix, if not exists + } filteredNodes, err := FilterNodes(nodes, []string{nf}) if err != nil { @@ -86,7 +88,7 @@ func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string] log.Tracef("Filtered %d nodes for suffix '%s' (filter: %s)", len(filteredNodes), suffix, nf) - result[suffix] = filteredNodes + result[suffix] = append(result[suffix], filteredNodes...) } return result, nil From bb237d3389e44ef975091268aa121c75b761f0fb Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 15:52:18 +0200 Subject: [PATCH 43/46] fix: do not allow the same direct port-mapping on more than one node --- cmd/cluster/clusterCreate.go | 4 ---- pkg/config/transform.go | 3 +++ 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/cmd/cluster/clusterCreate.go b/cmd/cluster/clusterCreate.go index 543084d7..0fb8ce3e 100644 --- a/cmd/cluster/clusterCreate.go +++ b/cmd/cluster/clusterCreate.go @@ -484,10 +484,6 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) { log.Fatalln(err) } - if len(filters) > 1 { - log.Fatalln("Can only apply a Portmap to one node") - } - // create new entry or append filter to existing entry if _, exists := portFilterMap[portmap]; exists { log.Fatalln("Same Portmapping can not be used for multiple nodes") diff --git a/pkg/config/transform.go b/pkg/config/transform.go index b1850d14..fda040c5 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -219,6 +219,9 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } } } else if suffix == "direct" { + if len(nodes) > 1 { + return nil, fmt.Errorf("error: cannot apply a direct port-mapping (%s) to more than one node", portmappings) + } for _, node := range nodes { if err := addPortMappings(node, portmappings); err != nil { return nil, err From 2162504ee16ccbc80ff788762a9cf45a975293ca Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 16:38:05 +0200 Subject: [PATCH 44/46] transformsimple: move transformports to own function --- pkg/config/transform.go | 150 +++++++++++++++++++++------------------- 1 file changed, 80 insertions(+), 70 deletions(-) diff --git a/pkg/config/transform.go b/pkg/config/transform.go index fda040c5..5df31ace 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -158,10 +158,10 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim /**************************** * Extra Node Configuration * ****************************/ - - // -> VOLUMES nodeCount := len(newCluster.Nodes) nodeList := newCluster.Nodes + + // -> VOLUMES for _, volumeWithNodeFilters := range simpleConfig.Volumes { nodes, err := util.FilterNodes(nodeList, volumeWithNodeFilters.NodeFilters) if err != nil { @@ -174,74 +174,8 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } // -> PORTS - for _, portWithNodeFilters := range simpleConfig.Ports { - log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters) - if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { - log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings) - portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings - } - - for _, f := range portWithNodeFilters.NodeFilters { - if strings.HasPrefix(f, "loadbalancer") { - log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings) - portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings - break - } - } - - filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) - if err != nil { - return nil, err - } - - nn := "" - for _, n := range filteredNodes["proxy"] { - nn = strings.Join([]string{nn, n.Name}, ",") - } - log.Debugf("Filtered nodes: %#v", nn) - - for suffix, nodes := range filteredNodes { - portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) - if err != nil { - return nil, fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err) - } - - if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings - if newCluster.ServerLoadBalancer == nil { - return nil, fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled") - } - if err := addPortMappings(newCluster.ServerLoadBalancer.Node, portmappings); err != nil { - return nil, err - } - for _, pm := range portmappings { - if err := loadbalancerAddPortConfigs(newCluster.ServerLoadBalancer, pm, nodes); err != nil { - return nil, err - } - } - } else if suffix == "direct" { - if len(nodes) > 1 { - return nil, fmt.Errorf("error: cannot apply a direct port-mapping (%s) to more than one node", portmappings) - } - for _, node := range nodes { - if err := addPortMappings(node, portmappings); err != nil { - return nil, err - } - } - } else if suffix != util.NodeFilterMapKeyAll { - return nil, fmt.Errorf("error adding port mappings: unknown suffix %s", suffix) - } - } - - } - - // print generated loadbalancer config - if log.GetLevel() >= log.DebugLevel { - yamlized, err := yaml.Marshal(newCluster.ServerLoadBalancer.Config) - if err != nil { - log.Errorf("error printing loadbalancer config: %v", err) - } else { - log.Debugf("generated loadbalancer config:\n%s", string(yamlized)) - } + if err := TransformPorts(ctx, runtime, &newCluster, simpleConfig.Ports); err != nil { + return nil, err } // -> K3S NODE LABELS @@ -456,3 +390,79 @@ nodenameLoop: return nil } + +func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, portsWithNodeFilters []conf.PortWithNodeFilters) error { + nodeCount := len(cluster.Nodes) + nodeList := cluster.Nodes + + for _, portWithNodeFilters := range portsWithNodeFilters { + log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters) + if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { + log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings + } + + for _, f := range portWithNodeFilters.NodeFilters { + if strings.HasPrefix(f, "loadbalancer") { + log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings + break + } + } + + filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) + if err != nil { + return err + } + + nn := "" + for _, n := range filteredNodes["proxy"] { + nn = strings.Join([]string{nn, n.Name}, ",") + } + log.Debugf("Filtered nodes: %#v", nn) + + for suffix, nodes := range filteredNodes { + portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) + if err != nil { + return fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err) + } + + if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings + if cluster.ServerLoadBalancer == nil { + return fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled") + } + if err := addPortMappings(cluster.ServerLoadBalancer.Node, portmappings); err != nil { + return err + } + for _, pm := range portmappings { + if err := loadbalancerAddPortConfigs(cluster.ServerLoadBalancer, pm, nodes); err != nil { + return err + } + } + } else if suffix == "direct" { + if len(nodes) > 1 { + return fmt.Errorf("error: cannot apply a direct port-mapping (%s) to more than one node", portmappings) + } + for _, node := range nodes { + if err := addPortMappings(node, portmappings); err != nil { + return err + } + } + } else if suffix != util.NodeFilterMapKeyAll { + return fmt.Errorf("error adding port mappings: unknown suffix %s", suffix) + } + } + + } + + // print generated loadbalancer config + if log.GetLevel() >= log.DebugLevel { + yamlized, err := yaml.Marshal(cluster.ServerLoadBalancer.Config) + if err != nil { + log.Errorf("error printing loadbalancer config: %v", err) + } else { + log.Debugf("generated loadbalancer config:\n%s", string(yamlized)) + } + } + return nil +} From 831e58fdce514cf6a5d0bedf970388085e4eab92 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 17:12:00 +0200 Subject: [PATCH 45/46] init clusteredit --- cmd/cluster/clusterEdit.go | 120 +++++++++++++++++++++++++++++++++++++ pkg/client/cluster.go | 69 +++++++++++++++++++++ pkg/config/transform.go | 25 +++----- pkg/config/types/types.go | 5 ++ 4 files changed, 202 insertions(+), 17 deletions(-) create mode 100644 cmd/cluster/clusterEdit.go diff --git a/cmd/cluster/clusterEdit.go b/cmd/cluster/clusterEdit.go new file mode 100644 index 00000000..be514fca --- /dev/null +++ b/cmd/cluster/clusterEdit.go @@ -0,0 +1,120 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package cluster + +import ( + "github.com/rancher/k3d/v4/cmd/util" + cliutil "github.com/rancher/k3d/v4/cmd/util" + "github.com/rancher/k3d/v4/pkg/client" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" + "github.com/rancher/k3d/v4/pkg/runtimes" + k3d "github.com/rancher/k3d/v4/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +// NewCmdNodeEdit returns a new cobra command +func NewCmdNodeEdit() *cobra.Command { + + // create new cobra command + cmd := &cobra.Command{ + Use: "edit NAME", + Short: "[EXPERIMENTAL] Edit cluster(s).", + Long: `[EXPERIMENTAL] Edit cluster(s).`, + Args: cobra.ExactArgs(1), + Aliases: []string{"update"}, + ValidArgsFunction: util.ValidArgsAvailableNodes, + Run: func(cmd *cobra.Command, args []string) { + + existingCluster, changeset := parseEditClusterCmd(cmd, args) + + log.Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingCluster, changeset) + + log.Infof("Successfully updated %s", existingCluster.Name) + + }, + } + + // add subcommands + + // add flags + cmd.Flags().StringArray("port-add", nil, "[EXPERIMENTAL] Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d node edit k3d-mycluster-serverlb --port-add 8080:80`") + + // done + return cmd +} + +// parseEditClusterCmd parses the command input into variables required to delete nodes +func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf.SimpleConfig) { + + existingCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: args[0]}) + if err != nil { + log.Fatalln(err) + } + + if existingCluster == nil { + log.Infof("Cluster %s not found", args[0]) + return nil, nil + } + + changeset := conf.SimpleConfig{} + + /* + * --port-add + */ + portFlags, err := cmd.Flags().GetStringArray("port-add") + if err != nil { + log.Errorln(err) + return nil, nil + } + + // init portmap + changeset.Ports = []conf.PortWithNodeFilters{} + + portFilterMap := make(map[string][]string, 1) + for _, portFlag := range portFlags { + + // split node filter from the specified volume + portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag) + if err != nil { + log.Fatalln(err) + } + + // create new entry or append filter to existing entry + if _, exists := portFilterMap[portmap]; exists { + log.Fatalln("Same Portmapping can not be used for multiple nodes") + } else { + portFilterMap[portmap] = filters + } + } + + for port, nodeFilters := range portFilterMap { + changeset.Ports = append(changeset.Ports, conf.PortWithNodeFilters{ + Port: port, + NodeFilters: nodeFilters, + }) + } + + log.Tracef("PortFilterMap: %+v", portFilterMap) + + return existingCluster, &changeset +} diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index b387d722..c626c790 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -29,6 +29,7 @@ import ( "io/ioutil" "sort" "strconv" + "strings" "time" gort "runtime" @@ -36,6 +37,7 @@ import ( "github.com/docker/go-connections/nat" "github.com/imdario/mergo" "github.com/rancher/k3d/v4/pkg/actions" + conftypes "github.com/rancher/k3d/v4/pkg/config/types" config "github.com/rancher/k3d/v4/pkg/config/v1alpha3" k3drt "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/runtimes/docker" @@ -779,6 +781,26 @@ func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster if !overwroteExisting { cluster.Nodes = append(cluster.Nodes, node) } + + } + + // Loadbalancer + if cluster.ServerLoadBalancer == nil { + for _, node := range cluster.Nodes { + if node.Role == k3d.LoadBalancerRole { + cluster.ServerLoadBalancer = &k3d.Loadbalancer{ + Node: node, + } + } + } + + if cluster.ServerLoadBalancer.Node != nil { + lbcfg, err := GetLoadbalancerConfig(ctx, runtime, cluster) + if err != nil { + return cluster, fmt.Errorf("error getting loadbalancer config for cluster %s: %w", cluster.Name, err) + } + cluster.ServerLoadBalancer.Config = &lbcfg + } } if err := populateClusterFieldsFromLabels(cluster); err != nil { @@ -1009,3 +1031,50 @@ func prepCreateLocalRegistryHostingConfigMap(ctx context.Context, runtime k3drt. } return nil } + +// ClusterEditChangesetSimple modifies an existing cluster with a given SimpleConfig changeset +func ClusterEditChangesetSimple(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, changeset *config.SimpleConfig) error { + nodeCount := len(cluster.Nodes) + nodeList := cluster.Nodes + // === Ports === + + existingLB := cluster.ServerLoadBalancer + lbChangeset := + + if len(changeset.Ports) > 0 { + for _, portWithNodeFilters := range changeset.Ports { + log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters) + if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { + log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, conftypes.DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = conftypes.DefaultTargetsNodefiltersPortMappings + } + + for _, f := range portWithNodeFilters.NodeFilters { + if strings.HasPrefix(f, "loadbalancer") { + log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, conftypes.DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = conftypes.DefaultTargetsNodefiltersPortMappings + break + } + } + + filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) + if err != nil { + return err + } + + for suffix, nodes := range filteredNodes { + switch suffix { + case "proxy", util.NodeFilterSuffixNone: + break + case util.NodeFilterMapKeyAll: + break + default: + return fmt.Errorf("error: 'cluster edit' does not (yet) support the '%s' opt/suffix for adding ports", suffix) + } + } + + } + } + + return nil +} diff --git a/pkg/config/transform.go b/pkg/config/transform.go index 5df31ace..1ceec0a5 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -32,6 +32,7 @@ import ( "github.com/docker/go-connections/nat" cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg "github.com/rancher/k3d/v4/pkg/client" + "github.com/rancher/k3d/v4/pkg/config/types" conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" k3d "github.com/rancher/k3d/v4/pkg/types" @@ -44,10 +45,6 @@ import ( log "github.com/sirupsen/logrus" ) -var ( - DefaultTargetsNodefiltersPortMappings = []string{"servers:*:proxy", "agents:*:proxy"} -) - // TransformSimpleToClusterConfig transforms a simple configuration to a full-fledged cluster configuration func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtime, simpleConfig conf.SimpleConfig) (*conf.ClusterConfig, error) { @@ -362,10 +359,10 @@ func addPortMappings(node *k3d.Node, portmappings []nat.PortMapping) error { return nil } -func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, pm nat.PortMapping, nodes []*k3d.Node) error { - portconfig := fmt.Sprintf("%s.%s", pm.Port.Port(), pm.Port.Proto()) +func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, portmapping nat.PortMapping, targetNodes []*k3d.Node) error { + portconfig := fmt.Sprintf("%s.%s", portmapping.Port.Port(), portmapping.Port.Proto()) nodenames := []string{} - for _, node := range nodes { + for _, node := range targetNodes { if node.Role == k3d.LoadBalancerRole { return fmt.Errorf("error adding port config to loadbalancer: cannot add port config referencing the loadbalancer itself (loop)") } @@ -398,14 +395,14 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d. for _, portWithNodeFilters := range portsWithNodeFilters { log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters) if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { - log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings) - portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings + log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings } for _, f := range portWithNodeFilters.NodeFilters { if strings.HasPrefix(f, "loadbalancer") { - log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings) - portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings + log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings break } } @@ -415,12 +412,6 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d. return err } - nn := "" - for _, n := range filteredNodes["proxy"] { - nn = strings.Join([]string{nn, n.Name}, ",") - } - log.Debugf("Filtered nodes: %#v", nn) - for suffix, nodes := range filteredNodes { portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) if err != nil { diff --git a/pkg/config/types/types.go b/pkg/config/types/types.go index ff0e26e5..89e440c4 100644 --- a/pkg/config/types/types.go +++ b/pkg/config/types/types.go @@ -32,3 +32,8 @@ type Config interface { GetKind() string GetAPIVersion() string } + +// Default Targets for NodeFilters +var ( + DefaultTargetsNodefiltersPortMappings = []string{"servers:*:proxy", "agents:*:proxy"} +) From 2516cad12e83a7b8d7e233481f651160b9811522 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 9 Jul 2021 12:08:30 +0200 Subject: [PATCH 46/46] allow for adding ports to a running cluster via the loadbalancer --- cmd/cluster/cluster.go | 1 + cmd/cluster/clusterEdit.go | 8 ++- pkg/client/cluster.go | 75 +++++++++++++++------- pkg/client/loadbalancer.go | 40 ++++++++++-- pkg/client/node.go | 4 +- pkg/client/ports.go | 127 +++++++++++++++++++++++++++++++++++++ pkg/config/transform.go | 117 +--------------------------------- 7 files changed, 225 insertions(+), 147 deletions(-) create mode 100644 pkg/client/ports.go diff --git a/cmd/cluster/cluster.go b/cmd/cluster/cluster.go index b932da6a..b1ed94cc 100644 --- a/cmd/cluster/cluster.go +++ b/cmd/cluster/cluster.go @@ -48,6 +48,7 @@ func NewCmdCluster() *cobra.Command { cmd.AddCommand(NewCmdClusterStop()) cmd.AddCommand(NewCmdClusterDelete()) cmd.AddCommand(NewCmdClusterList()) + cmd.AddCommand(NewCmdClusterEdit()) // add flags diff --git a/cmd/cluster/clusterEdit.go b/cmd/cluster/clusterEdit.go index be514fca..5c1c2e75 100644 --- a/cmd/cluster/clusterEdit.go +++ b/cmd/cluster/clusterEdit.go @@ -32,8 +32,8 @@ import ( "github.com/spf13/cobra" ) -// NewCmdNodeEdit returns a new cobra command -func NewCmdNodeEdit() *cobra.Command { +// NewCmdClusterEdit returns a new cobra command +func NewCmdClusterEdit() *cobra.Command { // create new cobra command cmd := &cobra.Command{ @@ -49,6 +49,10 @@ func NewCmdNodeEdit() *cobra.Command { log.Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingCluster, changeset) + if err := client.ClusterEditChangesetSimple(cmd.Context(), runtimes.SelectedRuntime, existingCluster, changeset); err != nil { + log.Fatalf("Failed to update the cluster: %v", err) + } + log.Infof("Successfully updated %s", existingCluster.Name) }, diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index 2c82ea57..4e7d68aa 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -35,8 +35,8 @@ import ( "github.com/docker/go-connections/nat" "github.com/imdario/mergo" + copystruct "github.com/mitchellh/copystructure" "github.com/rancher/k3d/v4/pkg/actions" - conftypes "github.com/rancher/k3d/v4/pkg/config/types" config "github.com/rancher/k3d/v4/pkg/config/v1alpha3" k3drt "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/runtimes/docker" @@ -1033,47 +1033,78 @@ func prepCreateLocalRegistryHostingConfigMap(ctx context.Context, runtime k3drt. // ClusterEditChangesetSimple modifies an existing cluster with a given SimpleConfig changeset func ClusterEditChangesetSimple(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, changeset *config.SimpleConfig) error { - nodeCount := len(cluster.Nodes) + // nodeCount := len(cluster.Nodes) nodeList := cluster.Nodes + // === Ports === existingLB := cluster.ServerLoadBalancer - lbChangeset := + lbChangeset := &k3d.Loadbalancer{} - if len(changeset.Ports) > 0 { - for _, portWithNodeFilters := range changeset.Ports { - log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters) - if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { - log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, conftypes.DefaultTargetsNodefiltersPortMappings) - portWithNodeFilters.NodeFilters = conftypes.DefaultTargetsNodefiltersPortMappings - } + // copy existing loadbalancer + lbChangesetNode, err := CopyNode(ctx, existingLB.Node, CopyNodeOpts{keepState: false}) + if err != nil { + return fmt.Errorf("error copying existing loadbalancer: %w", err) + } - for _, f := range portWithNodeFilters.NodeFilters { - if strings.HasPrefix(f, "loadbalancer") { - log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, conftypes.DefaultTargetsNodefiltersPortMappings) - portWithNodeFilters.NodeFilters = conftypes.DefaultTargetsNodefiltersPortMappings - break - } - } + lbChangeset.Node = lbChangesetNode + // copy config from existing loadbalancer + lbChangesetConfig, err := copystruct.Copy(existingLB.Config) + if err != nil { + return fmt.Errorf("error copying config from existing loadbalancer: %w", err) + } + + lbChangeset.Config = lbChangesetConfig.(*k3d.LoadbalancerConfig) + + // loop over ports + if len(changeset.Ports) > 0 { + // 1. ensure that there are only supported suffices in the node filters // TODO: overly complex right now, needs simplification + for _, portWithNodeFilters := range changeset.Ports { filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) if err != nil { return err } - for suffix, nodes := range filteredNodes { + for suffix := range filteredNodes { switch suffix { - case "proxy", util.NodeFilterSuffixNone: - break - case util.NodeFilterMapKeyAll: - break + case "proxy", util.NodeFilterSuffixNone, util.NodeFilterMapKeyAll: + continue default: return fmt.Errorf("error: 'cluster edit' does not (yet) support the '%s' opt/suffix for adding ports", suffix) } } + } + // 2. transform + cluster.ServerLoadBalancer = lbChangeset // we're working with pointers, so let's point to the changeset here to not update the original that we keep as a reference + if err := TransformPorts(ctx, runtime, cluster, changeset.Ports); err != nil { + return fmt.Errorf("error transforming port config %s: %w", changeset.Ports, err) } } + log.Debugf("ORIGINAL:\n> Ports: %+v\n> Config: %+v\nCHANGESET:\n> Ports: %+v\n> Config: %+v", existingLB.Node.Ports, existingLB.Config, lbChangeset.Node.Ports, lbChangeset.Config) + + // prepare to write config to lb container + configyaml, err := yaml.Marshal(lbChangeset.Config) + if err != nil { + return err + } + writeLbConfigAction := k3d.NodeHook{ + Stage: k3d.LifecycleStagePreStart, + Action: actions.WriteFileAction{ + Runtime: runtime, + Dest: k3d.DefaultLoadbalancerConfigPath, + Mode: 0744, + Content: configyaml, + }, + } + if lbChangeset.Node.HookActions == nil { + lbChangeset.Node.HookActions = []k3d.NodeHook{} + } + lbChangeset.Node.HookActions = append(lbChangeset.Node.HookActions, writeLbConfigAction) + + NodeReplace(ctx, runtime, existingLB.Node, lbChangeset.Node) + return nil } diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index c6bec707..028d30c8 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -39,8 +39,9 @@ import ( ) var ( - LBConfigErrHostNotFound = errors.New("lbconfig: host not found") - LBConfigErrFailedTest = errors.New("lbconfig: failed to test") + ErrLBConfigHostNotFound error = errors.New("lbconfig: host not found") + ErrLBConfigFailedTest error = errors.New("lbconfig: failed to test") + ErrLBConfigEntryExists error = errors.New("lbconfig: entry exists in config") ) // UpdateLoadbalancerConfig updates the loadbalancer config with an updated list of servers belonging to that cluster @@ -91,14 +92,14 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer.Node, "host not found in upstream", startTime) if err != nil { log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err) - return LBConfigErrFailedTest + return ErrLBConfigFailedTest } else { log.Warnln("Failed to configure loadbalancer because one of the nodes seems to be down! Run `k3d node list` to see which one it could be.") - return LBConfigErrHostNotFound + return ErrLBConfigHostNotFound } } else { log.Warnf("Failed to ensure that loadbalancer was configured correctly. Please check it manually or try again: %v", err) - return LBConfigErrFailedTest + return ErrLBConfigFailedTest } } log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Node.Name) @@ -206,3 +207,32 @@ func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster return lbNode, nil } + +func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, portmapping nat.PortMapping, targetNodes []*k3d.Node) error { + portconfig := fmt.Sprintf("%s.%s", portmapping.Port.Port(), portmapping.Port.Proto()) + nodenames := []string{} + for _, node := range targetNodes { + if node.Role == k3d.LoadBalancerRole { + return fmt.Errorf("error adding port config to loadbalancer: cannot add port config referencing the loadbalancer itself (loop)") + } + nodenames = append(nodenames, node.Name) + } + + // entry for that port doesn't exist yet, so we simply create it with the list of node names + if _, ok := loadbalancer.Config.Ports[portconfig]; !ok { + loadbalancer.Config.Ports[portconfig] = nodenames + return nil + } + +nodenameLoop: + for _, nodename := range nodenames { + for _, existingNames := range loadbalancer.Config.Ports[portconfig] { + if nodename == existingNames { + continue nodenameLoop + } + loadbalancer.Config.Ports[portconfig] = append(loadbalancer.Config.Ports[portconfig], nodename) + } + } + + return nil +} diff --git a/pkg/client/node.go b/pkg/client/node.go index 09e83cf0..2c8213b1 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -209,7 +209,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N if node.Role == k3d.ServerRole { log.Infoln("Updating loadbalancer config to include new server node(s)") if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil { - if !errors.Is(err, LBConfigErrHostNotFound) { + if !errors.Is(err, ErrLBConfigHostNotFound) { return fmt.Errorf("error updating loadbalancer: %w", err) } } @@ -473,7 +473,7 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, o // if it's a server node, then update the loadbalancer configuration if node.Role == k3d.ServerRole { if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil { - if !errors.Is(err, LBConfigErrHostNotFound) { + if !errors.Is(err, ErrLBConfigHostNotFound) { return fmt.Errorf("Failed to update cluster loadbalancer: %w", err) } } diff --git a/pkg/client/ports.go b/pkg/client/ports.go new file mode 100644 index 00000000..2885d1a7 --- /dev/null +++ b/pkg/client/ports.go @@ -0,0 +1,127 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package client + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/docker/go-connections/nat" + "github.com/rancher/k3d/v4/pkg/config/types" + config "github.com/rancher/k3d/v4/pkg/config/v1alpha3" + "github.com/rancher/k3d/v4/pkg/runtimes" + k3d "github.com/rancher/k3d/v4/pkg/types" + "github.com/rancher/k3d/v4/pkg/util" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +var ( + ErrNodeAddPortsExists error = errors.New("port exists on target") +) + +func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, portsWithNodeFilters []config.PortWithNodeFilters) error { + nodeCount := len(cluster.Nodes) + nodeList := cluster.Nodes + + for _, portWithNodeFilters := range portsWithNodeFilters { + log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters) + if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { + log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings + } + + for _, f := range portWithNodeFilters.NodeFilters { + if strings.HasPrefix(f, "loadbalancer") { + log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings + break + } + } + + filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) + if err != nil { + return err + } + + for suffix, nodes := range filteredNodes { + portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) + if err != nil { + return fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err) + } + + if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings + if cluster.ServerLoadBalancer == nil { + return fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled") + } + if err := addPortMappings(cluster.ServerLoadBalancer.Node, portmappings); err != nil { + return err + } + for _, pm := range portmappings { + if err := loadbalancerAddPortConfigs(cluster.ServerLoadBalancer, pm, nodes); err != nil { + return err + } + } + } else if suffix == "direct" { + if len(nodes) > 1 { + return fmt.Errorf("error: cannot apply a direct port-mapping (%s) to more than one node", portmappings) + } + for _, node := range nodes { + if err := addPortMappings(node, portmappings); err != nil { + return err + } + } + } else if suffix != util.NodeFilterMapKeyAll { + return fmt.Errorf("error adding port mappings: unknown suffix %s", suffix) + } + } + + } + + // print generated loadbalancer config + if log.GetLevel() >= log.DebugLevel { + yamlized, err := yaml.Marshal(cluster.ServerLoadBalancer.Config) + if err != nil { + log.Errorf("error printing loadbalancer config: %v", err) + } else { + log.Debugf("generated loadbalancer config:\n%s", string(yamlized)) + } + } + return nil +} + +func addPortMappings(node *k3d.Node, portmappings []nat.PortMapping) error { + + if node.Ports == nil { + node.Ports = nat.PortMap{} + } + for _, pm := range portmappings { + if _, exists := node.Ports[pm.Port]; exists { + node.Ports[pm.Port] = append(node.Ports[pm.Port], pm.Binding) + } else { + node.Ports[pm.Port] = []nat.PortBinding{pm.Binding} + } + } + return nil +} diff --git a/pkg/config/transform.go b/pkg/config/transform.go index b620a8f3..b8ba4b1c 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -32,7 +32,6 @@ import ( "github.com/docker/go-connections/nat" cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg "github.com/rancher/k3d/v4/pkg/client" - "github.com/rancher/k3d/v4/pkg/config/types" conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" k3d "github.com/rancher/k3d/v4/pkg/types" @@ -175,7 +174,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } // -> PORTS - if err := TransformPorts(ctx, runtime, &newCluster, simpleConfig.Ports); err != nil { + if err := client.TransformPorts(ctx, runtime, &newCluster, simpleConfig.Ports); err != nil { return nil, err } @@ -347,117 +346,3 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return clusterConfig, nil } - -func addPortMappings(node *k3d.Node, portmappings []nat.PortMapping) error { - - if node.Ports == nil { - node.Ports = nat.PortMap{} - } - for _, pm := range portmappings { - if _, exists := node.Ports[pm.Port]; exists { - node.Ports[pm.Port] = append(node.Ports[pm.Port], pm.Binding) - } else { - node.Ports[pm.Port] = []nat.PortBinding{pm.Binding} - } - } - return nil -} - -func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, portmapping nat.PortMapping, targetNodes []*k3d.Node) error { - portconfig := fmt.Sprintf("%s.%s", portmapping.Port.Port(), portmapping.Port.Proto()) - nodenames := []string{} - for _, node := range targetNodes { - if node.Role == k3d.LoadBalancerRole { - return fmt.Errorf("error adding port config to loadbalancer: cannot add port config referencing the loadbalancer itself (loop)") - } - nodenames = append(nodenames, node.Name) - } - - // entry for that port doesn't exist yet, so we simply create it with the list of node names - if _, ok := loadbalancer.Config.Ports[portconfig]; !ok { - loadbalancer.Config.Ports[portconfig] = nodenames - return nil - } - -nodenameLoop: - for _, nodename := range nodenames { - for _, existingNames := range loadbalancer.Config.Ports[portconfig] { - if nodename == existingNames { - continue nodenameLoop - } - loadbalancer.Config.Ports[portconfig] = append(loadbalancer.Config.Ports[portconfig], nodename) - } - } - - return nil -} - -func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, portsWithNodeFilters []conf.PortWithNodeFilters) error { - nodeCount := len(cluster.Nodes) - nodeList := cluster.Nodes - - for _, portWithNodeFilters := range portsWithNodeFilters { - log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters) - if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { - log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings) - portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings - } - - for _, f := range portWithNodeFilters.NodeFilters { - if strings.HasPrefix(f, "loadbalancer") { - log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings) - portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings - break - } - } - - filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) - if err != nil { - return err - } - - for suffix, nodes := range filteredNodes { - portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) - if err != nil { - return fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err) - } - - if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings - if cluster.ServerLoadBalancer == nil { - return fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled") - } - if err := addPortMappings(cluster.ServerLoadBalancer.Node, portmappings); err != nil { - return err - } - for _, pm := range portmappings { - if err := loadbalancerAddPortConfigs(cluster.ServerLoadBalancer, pm, nodes); err != nil { - return err - } - } - } else if suffix == "direct" { - if len(nodes) > 1 { - return fmt.Errorf("error: cannot apply a direct port-mapping (%s) to more than one node", portmappings) - } - for _, node := range nodes { - if err := addPortMappings(node, portmappings); err != nil { - return err - } - } - } else if suffix != util.NodeFilterMapKeyAll { - return fmt.Errorf("error adding port mappings: unknown suffix %s", suffix) - } - } - - } - - // print generated loadbalancer config - if log.GetLevel() >= log.DebugLevel { - yamlized, err := yaml.Marshal(cluster.ServerLoadBalancer.Config) - if err != nil { - log.Errorf("error printing loadbalancer config: %v", err) - } else { - log.Debugf("generated loadbalancer config:\n%s", string(yamlized)) - } - } - return nil -}