fix(): informer should now reach to changes and not process everything every 2 seconds. also corrected path in probes
This commit is contained in:
@@ -64,11 +64,11 @@ resources: {}
|
|||||||
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /
|
path: /health
|
||||||
port: http
|
port: http
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /
|
path: /health
|
||||||
port: http
|
port: http
|
||||||
|
|
||||||
env:
|
env:
|
||||||
|
|||||||
@@ -21,8 +21,7 @@ func StartLeaseInformer(cfg *Config, stopCh <-chan struct{}) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Could not generate client for informer: %s", err.Error())
|
return fmt.Errorf("Could not generate client for informer: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
factory := informers.NewSharedInformerFactoryWithOptions(client, 0, informers.WithNamespace("kube-system"))
|
||||||
factory := informers.NewSharedInformerFactoryWithOptions(&client, 0, informers.WithNamespace("kube-system"))
|
|
||||||
leaseInformer := factory.Coordination().V1().Leases().Informer()
|
leaseInformer := factory.Coordination().V1().Leases().Informer()
|
||||||
|
|
||||||
leaseInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
leaseInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package internal
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
v1 "k8s.io/api/coordination/v1"
|
v1 "k8s.io/api/coordination/v1"
|
||||||
@@ -12,19 +11,19 @@ import (
|
|||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
func generateClient() (kubernetes.Clientset, error) {
|
func generateClient() (kubernetes.Interface, error) {
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"Caller": "generateClient",
|
"Caller": "generateClient",
|
||||||
}).Info("Generating k8s client")
|
}).Info("Generating k8s client")
|
||||||
config, err := rest.InClusterConfig()
|
config, err := rest.InClusterConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return kubernetes.Clientset{}, fmt.Errorf("Could not generate in cluster config: %s", err.Error())
|
return nil, fmt.Errorf("Could not generate in cluster config: %s", err.Error())
|
||||||
}
|
}
|
||||||
clientset, err := kubernetes.NewForConfig(config)
|
clientset, err := kubernetes.NewForConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return kubernetes.Clientset{}, fmt.Errorf("Could not generate clientset: %s", err.Error())
|
return nil, fmt.Errorf("Could not generate clientset: %s", err.Error())
|
||||||
}
|
}
|
||||||
return *clientset, nil
|
return clientset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func LabelNode(nodeName string, leaseName string, cfg *Config) error {
|
func LabelNode(nodeName string, leaseName string, cfg *Config) error {
|
||||||
@@ -33,7 +32,7 @@ func LabelNode(nodeName string, leaseName string, cfg *Config) error {
|
|||||||
return fmt.Errorf("Could not generate client: %s", err.Error())
|
return fmt.Errorf("Could not generate client: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return ApplyLabelToNode(&client, nodeName, leaseName, cfg)
|
return ApplyLabelToNode(client, nodeName, leaseName, cfg)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,10 +99,6 @@ func GetNodeNameFromLease(lease v1.Lease) (string, error) {
|
|||||||
return "", fmt.Errorf("Lease %s has no HolderIdentity", lease.Name)
|
return "", fmt.Errorf("Lease %s has no HolderIdentity", lease.Name)
|
||||||
}
|
}
|
||||||
holderIdentity := *lease.Spec.HolderIdentity
|
holderIdentity := *lease.Spec.HolderIdentity
|
||||||
parts := strings.Split(holderIdentity, "-")
|
|
||||||
if len(parts) < 3 {
|
return holderIdentity, nil
|
||||||
return "", fmt.Errorf("Unexpected HolderIdentity format: %s", holderIdentity)
|
|
||||||
}
|
|
||||||
nodeName := strings.Join(parts[2:], "-")
|
|
||||||
return nodeName, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,23 +20,11 @@ func TestGetNodeNameFromLease_Valid(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if node != "node1" {
|
if node != "aa-bb-node1" {
|
||||||
t.Fatalf("unexpected node name: got %s", node)
|
t.Fatalf("unexpected node name: got %s", node)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetNodeNameFromLease_Invalid(t *testing.T) {
|
|
||||||
l := coordv1.Lease{}
|
|
||||||
l.Name = "cilium-l2announce-2"
|
|
||||||
hi := "too-short"
|
|
||||||
l.Spec.HolderIdentity = &hi
|
|
||||||
|
|
||||||
_, err := GetNodeNameFromLease(l)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected error for invalid holder identity")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyLabelToNode_WithFakeClient(t *testing.T) {
|
func TestApplyLabelToNode_WithFakeClient(t *testing.T) {
|
||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user