chore: pkg imported more than once (#1313)

pull/1298/head
guangwu 12 months ago committed by GitHub
parent 2b0bbc978c
commit d36cc4989c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      cmd/cluster/clusterCreate.go
  2. 3
      cmd/cluster/clusterEdit.go
  3. 3
      cmd/cluster/clusterStart.go
  4. 10
      pkg/client/cluster.go
  5. 11
      pkg/client/loadbalancer.go
  6. 9
      pkg/config/config.go

@ -36,7 +36,6 @@ import (
"inet.af/netaddr"
"sigs.k8s.io/yaml"
"github.com/k3d-io/k3d/v5/cmd/util"
cliutil "github.com/k3d-io/k3d/v5/cmd/util"
cliconfig "github.com/k3d-io/k3d/v5/cmd/util/config"
k3dCluster "github.com/k3d-io/k3d/v5/pkg/client"
@ -512,7 +511,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
l.Log().Tracef("RuntimeLabelFilterMap: %+v", runtimeLabelFilterMap)
for _, ulimit := range ppViper.GetStringSlice("cli.runtime-ulimits") {
cfg.Options.Runtime.Ulimits = append(cfg.Options.Runtime.Ulimits, *util.ParseRuntimeUlimit[conf.Ulimit](ulimit))
cfg.Options.Runtime.Ulimits = append(cfg.Options.Runtime.Ulimits, *cliutil.ParseRuntimeUlimit[conf.Ulimit](ulimit))
}
// --env

@ -22,7 +22,6 @@ THE SOFTWARE.
package cluster
import (
"github.com/k3d-io/k3d/v5/cmd/util"
cliutil "github.com/k3d-io/k3d/v5/cmd/util"
"github.com/k3d-io/k3d/v5/pkg/client"
conf "github.com/k3d-io/k3d/v5/pkg/config/v1alpha5"
@ -41,7 +40,7 @@ func NewCmdClusterEdit() *cobra.Command {
Long: `[EXPERIMENTAL] Edit cluster(s).`,
Args: cobra.ExactArgs(1),
Aliases: []string{"update"},
ValidArgsFunction: util.ValidArgsAvailableClusters,
ValidArgsFunction: cliutil.ValidArgsAvailableClusters,
Run: func(cmd *cobra.Command, args []string) {
existingCluster, changeset := parseEditClusterCmd(cmd, args)

@ -27,7 +27,6 @@ import (
"github.com/k3d-io/k3d/v5/cmd/util"
"github.com/k3d-io/k3d/v5/pkg/client"
"github.com/k3d-io/k3d/v5/pkg/runtimes"
"github.com/k3d-io/k3d/v5/pkg/types"
"github.com/spf13/cobra"
l "github.com/k3d-io/k3d/v5/pkg/logger"
@ -36,7 +35,7 @@ import (
// NewCmdClusterStart returns a new cobra command
func NewCmdClusterStart() *cobra.Command {
startClusterOpts := types.ClusterStartOpts{
startClusterOpts := k3d.ClusterStartOpts{
Intent: k3d.IntentClusterStart,
}

@ -46,10 +46,8 @@ import (
"github.com/k3d-io/k3d/v5/pkg/actions"
config "github.com/k3d-io/k3d/v5/pkg/config/v1alpha5"
l "github.com/k3d-io/k3d/v5/pkg/logger"
"github.com/k3d-io/k3d/v5/pkg/runtimes"
k3drt "github.com/k3d-io/k3d/v5/pkg/runtimes"
runtimeErr "github.com/k3d-io/k3d/v5/pkg/runtimes/errors"
"github.com/k3d-io/k3d/v5/pkg/types"
k3d "github.com/k3d-io/k3d/v5/pkg/types"
"github.com/k3d-io/k3d/v5/pkg/types/k3s"
"github.com/k3d-io/k3d/v5/pkg/util"
@ -361,7 +359,7 @@ ClusterCreatOpts:
*/
if cluster.KubeAPI.Host == k3d.DefaultAPIHost && runtime == k3drt.Docker {
// If the runtime is docker, attempt to use the docker host
if runtime == runtimes.Docker {
if runtime == k3drt.Docker {
dockerHost := runtime.GetHost()
if dockerHost != "" {
dockerHost = strings.Split(dockerHost, ":")[0] // remove the port
@ -844,7 +842,7 @@ func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster
}
}
vols, err := runtime.GetVolumesByLabel(ctx, map[string]string{types.LabelClusterName: cluster.Name})
vols, err := runtime.GetVolumesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name})
if err != nil {
return nil, err
}
@ -871,7 +869,7 @@ func GenerateNodeName(cluster string, role k3d.Role, suffix int) string {
}
// ClusterStart starts a whole cluster (i.e. all nodes of the cluster)
func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterStartOpts types.ClusterStartOpts) error {
func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterStartOpts k3d.ClusterStartOpts) error {
l.Log().Infof("Starting cluster '%s'", cluster.Name)
if clusterStartOpts.Intent == "" {
@ -920,7 +918,7 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
if err := NodeStart(ctx, runtime, initNode, &k3d.NodeStartOpts{
Wait: true, // always wait for the init node
NodeHooks: clusterStartOpts.NodeHooks,
ReadyLogMessage: types.GetReadyLogMessage(initNode, clusterStartOpts.Intent), // initNode means, that we're using etcd -> this will need quorum, so "k3s is up and running" won't happen right now
ReadyLogMessage: k3d.GetReadyLogMessage(initNode, clusterStartOpts.Intent), // initNode means, that we're using etcd -> this will need quorum, so "k3s is up and running" won't happen right now
EnvironmentInfo: clusterStartOpts.EnvironmentInfo,
}); err != nil {
return fmt.Errorf("Failed to start initializing server node: %+v", err)

@ -38,7 +38,6 @@ import (
l "github.com/k3d-io/k3d/v5/pkg/logger"
"github.com/k3d-io/k3d/v5/pkg/runtimes"
"github.com/k3d-io/k3d/v5/pkg/types"
k3d "github.com/k3d-io/k3d/v5/pkg/types"
)
@ -111,13 +110,13 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
return nil
}
func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (types.LoadbalancerConfig, error) {
func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (k3d.LoadbalancerConfig, error) {
var cfg k3d.LoadbalancerConfig
if cluster.ServerLoadBalancer == nil || cluster.ServerLoadBalancer.Node == nil {
cluster.ServerLoadBalancer = &k3d.Loadbalancer{}
for _, node := range cluster.Nodes {
if node.Role == types.LoadBalancerRole {
if node.Role == k3d.LoadBalancerRole {
var err error
cluster.ServerLoadBalancer.Node, err = NodeGet(ctx, runtime, node)
if err != nil {
@ -127,9 +126,9 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste
}
}
reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node)
reader, err := runtime.ReadFromNode(ctx, k3d.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node)
if err != nil {
return cfg, fmt.Errorf("runtime failed to read loadbalancer config '%s' from node '%s': %w", types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node.Name, err)
return cfg, fmt.Errorf("runtime failed to read loadbalancer config '%s' from node '%s': %w", k3d.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node.Name, err)
}
defer reader.Close()
@ -176,7 +175,7 @@ func LoadbalancerGenerateConfig(cluster *k3d.Cluster) (k3d.LoadbalancerConfig, e
return lbConfig, nil
}
func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, error) {
func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, error) {
labels := map[string]string{}
if opts != nil && opts.Labels == nil && len(opts.Labels) == 0 {

@ -32,7 +32,6 @@ import (
"github.com/k3d-io/k3d/v5/pkg/config/v1alpha2"
"github.com/k3d-io/k3d/v5/pkg/config/v1alpha3"
"github.com/k3d-io/k3d/v5/pkg/config/v1alpha4"
"github.com/k3d-io/k3d/v5/pkg/config/v1alpha5"
defaultConfig "github.com/k3d-io/k3d/v5/pkg/config/v1alpha5"
types "github.com/k3d-io/k3d/v5/pkg/config/types"
@ -44,7 +43,7 @@ var Schemas = map[string]string{
v1alpha2.ApiVersion: v1alpha2.JSONSchema,
v1alpha3.ApiVersion: v1alpha3.JSONSchema,
v1alpha4.ApiVersion: v1alpha4.JSONSchema,
v1alpha5.ApiVersion: v1alpha5.JSONSchema,
defaultConfig.ApiVersion: defaultConfig.JSONSchema,
}
func GetSchemaByVersion(apiVersion string) ([]byte, error) {
@ -72,7 +71,7 @@ func FromViper(config *viper.Viper) (types.Config, error) {
case "k3d.io/v1alpha4":
cfg, err = v1alpha4.GetConfigByKind(kind)
case "k3d.io/v1alpha5":
cfg, err = v1alpha5.GetConfigByKind(kind)
cfg, err = defaultConfig.GetConfigByKind(kind)
case "":
cfg, err = defaultConfig.GetConfigByKind(kind)
default:
@ -96,8 +95,8 @@ func getMigrations(version string) map[string]func(types.Config) (types.Config,
return v1alpha3.Migrations
case v1alpha4.ApiVersion:
return v1alpha4.Migrations
case v1alpha5.ApiVersion:
return v1alpha5.Migrations
case defaultConfig.ApiVersion:
return defaultConfig.Migrations
default:
return nil
}

Loading…
Cancel
Save