l.Log().Debugf("Running with %s=%s",k3d.K3dEnvDebugCorednsRetries,v)
ifr,err:=strconv.Atoi(v);err==nil{
retries=r
}else{
returnfmt.Errorf("Invalid value set for env var %s (%s): %w",k3d.K3dEnvDebugCorednsRetries,v,err)
}
}
// select any server node
varnode*k3d.Node
for_,n:=rangecluster.Nodes{
ifn.Role==k3d.ServerRole{
node=n
}
}
hostsEntry:=fmt.Sprintf("%s %s",ip,name)
patchCmd:=`patch=$(kubectl get cm coredns -n kube-system --template='{{.data.NodeHosts}}' | sed -n -E -e '/[0-9\.]{4,12}\s`+name+`$/!p' -e '$a`+hostsEntry+`' | tr '\n' '^' | busybox xargs -0 printf '{"data": {"NodeHosts":"%s"}}'| sed -E 's%\^%\\n%g') && kubectl patch cm coredns -n kube-system -p="$patch"`
successInjectCoreDNSEntry:=false
// try 3 (or K3D_DEBUG_COREDNS_RETRIES value) times, as e.g. on cluster startup it may take some time for the Configmap to be available and the server to be responsive
fori:=0;i<retries;i++{
l.Log().Debugf("Running CoreDNS patch in node %s to add %s (try %d/%d)...",node.Name,hostsEntry,i,retries)
msg:=fmt.Sprintf("(try %d/%d) error patching the CoreDNS ConfigMap to include entry '%s': %+v",i,retries,hostsEntry,err)
iflogreader!=nil{
readlogs,err:=io.ReadAll(logreader)
iferr!=nil{
l.Log().Debugf("(try %d/%d) error reading the logs from failed CoreDNS patch exec process in node %s: %v",i,retries,node.Name,err)
}else{
msg+=fmt.Sprintf("\nLogs: %s",string(readlogs))
}
}else{
l.Log().Debugf("(try %d/%d) error reading the logs from failed CoreDNS patch exec process in node %s: no logreader returned for exec process",i,retries,node.Name)
}
l.Log().Debugln(msg)
time.Sleep(1*time.Second)
}
}
if!successInjectCoreDNSEntry{
returnfmt.Errorf("failed to patch CoreDNS ConfigMap to include entry '%s' (%d tries, see debug logs)",hostsEntry,retries)
}
l.Log().Debugf("Successfully patched CoreDNS Configmap with record '%s'",hostsEntry)