diff --git a/docs/usage/guides/calico.md b/docs/usage/guides/calico.md index 1a8a8a16..fd1c71eb 100644 --- a/docs/usage/guides/calico.md +++ b/docs/usage/guides/calico.md @@ -34,7 +34,7 @@ For watching for the pod(s) deployment watch "kubectl get pods -n kube-system" ``` -You will have something like this at begining (with the command line `kubectl get pods -n kube-system`) +You will have something like this at beginning (with the command line `kubectl get pods -n kube-system`) ``` NAME READY STATUS RESTARTS AGE helm-install-traefik-pn84f 0/1 Pending 0 3s diff --git a/docs/usage/guides/calico.yaml b/docs/usage/guides/calico.yaml index b077877e..9de2ffd0 100644 --- a/docs/usage/guides/calico.yaml +++ b/docs/usage/guides/calico.yaml @@ -2092,7 +2092,7 @@ spec: type: object ipipMode: description: Contains configuration for IPIP tunneling for this pool. - If not specified, then this is defaulted to "Never" (i.e. IPIP tunelling + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling is disabled). type: string nat-outgoing: @@ -2112,7 +2112,7 @@ spec: vxlanMode: description: Contains configuration for VXLAN tunneling for this pool. If not specified, then this is defaulted to "Never" (i.e. VXLAN - tunelling is disabled). + tunneling is disabled). type: string required: - cidr diff --git a/docs/usage/guides/cuda.md b/docs/usage/guides/cuda.md index 11135c9a..efb7320d 100644 --- a/docs/usage/guides/cuda.md +++ b/docs/usage/guides/cuda.md @@ -116,7 +116,7 @@ We need to configure containerd to use the NVIDIA Container Runtime. We need to ``` ### The NVIDIA device plugin -To enable NVIDIA GPU support on Kubernetes you also need to install the [NVIDIA device plugin](https://github.com/NVIDIA/k8s-device-plugin). The device plugin is a deamonset and allows you to automatically: +To enable NVIDIA GPU support on Kubernetes you also need to install the [NVIDIA device plugin](https://github.com/NVIDIA/k8s-device-plugin). The device plugin is a daemonset and allows you to automatically: * Expose the number of GPUs on each nodes of your cluster * Keep track of the health of your GPUs * Run GPU enabled containers in your Kubernetes cluster. diff --git a/mkdocs.yml b/mkdocs.yml index 5291f60b..75a3f185 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -30,7 +30,7 @@ theme: favicon: static/img/favicons_black_blue/favicon.png # Navigation -# nav: ommitted, because we're using the awesome-pages plugin (https://squidfunk.github.io/mkdocs-material/plugins/awesome-pages/) +# nav: omitted, because we're using the awesome-pages plugin (https://squidfunk.github.io/mkdocs-material/plugins/awesome-pages/) # Extensions markdown_extensions: diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 48a4c027..d06230e4 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -229,7 +229,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus logreader.Close() } log.Errorln(err) - log.Errorln("Failed to get logs from the initializig server node.. waiting for 3 seconds instead") + log.Errorln("Failed to get logs from the initializing server node.. waiting for 3 seconds instead") time.Sleep(3 * time.Second) break } @@ -278,7 +278,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus } } - // asynchronously wait for this server node to be ready (by checking the logs for a specific log mesage) + // asynchronously wait for this server node to be ready (by checking the logs for a specific log message) if node.Role == k3d.ServerRole && cluster.CreateClusterOpts.WaitForServer { log.Debugf("Waiting for server node '%s' to get ready", node.Name) if err := NodeWaitForLogMessage(clusterCreateCtx, runtime, node, k3d.ReadyLogMessageByRole[k3d.ServerRole], time.Time{}); err != nil { @@ -598,7 +598,7 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust continue } - // asynchronously wait for this server node to be ready (by checking the logs for a specific log mesage) + // asynchronously wait for this server node to be ready (by checking the logs for a specific log message) if node.Role == k3d.ServerRole && startClusterOpts.WaitForServer { serverNode := node waitForServerWaitgroup.Go(func() error { diff --git a/pkg/tools/tools.go b/pkg/tools/tools.go index 2f386de2..0cb67b49 100644 --- a/pkg/tools/tools.go +++ b/pkg/tools/tools.go @@ -46,7 +46,7 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, runtimeImages, err := runtime.GetImages(ctx) if err != nil { - log.Errorln("Failed to fetch list of exsiting images from runtime") + log.Errorln("Failed to fetch list of existing images from runtime") return err }