fix(): informer should now reach to changes and not process everything every 2 seconds. also corrected path in probes
Some checks failed
Lint and Test Charts / lint-test (push) Has been cancelled
Build and Test / Test (push) Failing after 21s
Build and Test / Build_Image_arm64 (push) Successful in 2m4s
Build and Test / Build_Image_amd64 (push) Has been cancelled

This commit is contained in:
2026-01-20 21:55:32 +01:00
parent 73afa9c9f7
commit 1da2e795c2
4 changed files with 11 additions and 29 deletions

View File

@@ -21,8 +21,7 @@ func StartLeaseInformer(cfg *Config, stopCh <-chan struct{}) error {
if err != nil {
return fmt.Errorf("Could not generate client for informer: %s", err.Error())
}
factory := informers.NewSharedInformerFactoryWithOptions(&client, 0, informers.WithNamespace("kube-system"))
factory := informers.NewSharedInformerFactoryWithOptions(client, 0, informers.WithNamespace("kube-system"))
leaseInformer := factory.Coordination().V1().Leases().Informer()
leaseInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{

View File

@@ -3,7 +3,6 @@ package internal
import (
"context"
"fmt"
"strings"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/coordination/v1"
@@ -12,19 +11,19 @@ import (
"k8s.io/client-go/rest"
)
func generateClient() (kubernetes.Clientset, error) {
func generateClient() (kubernetes.Interface, error) {
log.WithFields(log.Fields{
"Caller": "generateClient",
}).Info("Generating k8s client")
config, err := rest.InClusterConfig()
if err != nil {
return kubernetes.Clientset{}, fmt.Errorf("Could not generate in cluster config: %s", err.Error())
return nil, fmt.Errorf("Could not generate in cluster config: %s", err.Error())
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return kubernetes.Clientset{}, fmt.Errorf("Could not generate clientset: %s", err.Error())
return nil, fmt.Errorf("Could not generate clientset: %s", err.Error())
}
return *clientset, nil
return clientset, nil
}
func LabelNode(nodeName string, leaseName string, cfg *Config) error {
@@ -33,7 +32,7 @@ func LabelNode(nodeName string, leaseName string, cfg *Config) error {
return fmt.Errorf("Could not generate client: %s", err.Error())
}
return ApplyLabelToNode(&client, nodeName, leaseName, cfg)
return ApplyLabelToNode(client, nodeName, leaseName, cfg)
}
@@ -100,10 +99,6 @@ func GetNodeNameFromLease(lease v1.Lease) (string, error) {
return "", fmt.Errorf("Lease %s has no HolderIdentity", lease.Name)
}
holderIdentity := *lease.Spec.HolderIdentity
parts := strings.Split(holderIdentity, "-")
if len(parts) < 3 {
return "", fmt.Errorf("Unexpected HolderIdentity format: %s", holderIdentity)
}
nodeName := strings.Join(parts[2:], "-")
return nodeName, nil
return holderIdentity, nil
}

View File

@@ -20,23 +20,11 @@ func TestGetNodeNameFromLease_Valid(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if node != "node1" {
if node != "aa-bb-node1" {
t.Fatalf("unexpected node name: got %s", node)
}
}
func TestGetNodeNameFromLease_Invalid(t *testing.T) {
l := coordv1.Lease{}
l.Name = "cilium-l2announce-2"
hi := "too-short"
l.Spec.HolderIdentity = &hi
_, err := GetNodeNameFromLease(l)
if err == nil {
t.Fatalf("expected error for invalid holder identity")
}
}
func TestApplyLabelToNode_WithFakeClient(t *testing.T) {
client := fake.NewSimpleClientset()