feat(): switch to informer
This commit is contained in:
97
internal/informer.go
Normal file
97
internal/informer.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/coordination/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// StartLeaseInformer starts a long-running informer watching Lease objects in the kube-system namespace.
|
||||
// It will react to Add/Update events for leases whose name starts with "cilium-l2announce" and label
|
||||
// the corresponding node.
|
||||
func StartLeaseInformer(cfg *Config, stopCh <-chan struct{}) error {
|
||||
log.WithFields(log.Fields{"Caller": "StartLeaseInformer"}).Info("Starting lease informer")
|
||||
|
||||
client, err := generateClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not generate client for informer: %s", err.Error())
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactoryWithOptions(&client, 0, informers.WithNamespace("kube-system"))
|
||||
leaseInformer := factory.Coordination().V1().Leases().Informer()
|
||||
|
||||
leaseInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
handleLease(obj, cfg)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
handleLease(newObj, cfg)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
// nothing to do on delete for now
|
||||
},
|
||||
})
|
||||
|
||||
factory.Start(stopCh)
|
||||
|
||||
// wait for cache sync with timeout
|
||||
synced := make(chan struct{})
|
||||
go func() {
|
||||
if !cache.WaitForCacheSync(stopCh, leaseInformer.HasSynced) {
|
||||
close(synced)
|
||||
return
|
||||
}
|
||||
close(synced)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-synced:
|
||||
// proceed
|
||||
case <-time.After(15 * time.Second):
|
||||
return fmt.Errorf("timed out waiting for informer cache sync")
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{"Caller": "StartLeaseInformer"}).Info("Lease informer running")
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleLease(obj interface{}, cfg *Config) {
|
||||
lease, ok := obj.(*v1.Lease)
|
||||
if !ok {
|
||||
log.WithFields(log.Fields{"Caller": "handleLease"}).Warn("Could not cast object to Lease")
|
||||
return
|
||||
}
|
||||
|
||||
if lease == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if lease.Name == "" || len(lease.Name) < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(lease.Name, "cilium-l2announce") {
|
||||
return
|
||||
}
|
||||
|
||||
node, err := GetNodeNameFromLease(*lease)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{"Caller": "handleLease", "Lease": lease.Name}).Errorf("Error parsing lease: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.DryRun {
|
||||
log.WithFields(log.Fields{"Caller": "handleLease"}).Infof("Dry run: would label node %s with %s=true (lease %s)", node, cfg.CiliumLabel, lease.Name)
|
||||
return
|
||||
}
|
||||
|
||||
err = LabelNode(node, lease.Name, cfg)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{"Caller": "handleLease", "Node": node, "Lease": lease.Name}).Errorf("Error labeling node: %s", err.Error())
|
||||
}
|
||||
}
|
||||
@@ -92,36 +92,36 @@ func RemoveLabelFromNode(nodeName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetCiliumL2Leases() ([]v1.Lease, error) {
|
||||
client, err := generateClient()
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{"Caller": "GetCiliumL2Leases"}).Errorf("Could not generate client: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
// func GetCiliumL2Leases() ([]v1.Lease, error) {
|
||||
// client, err := generateClient()
|
||||
// if err != nil {
|
||||
// log.WithFields(log.Fields{"Caller": "GetCiliumL2Leases"}).Errorf("Could not generate client: %s", err.Error())
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
leases, err := client.CoordinationV1().Leases("kube-system").List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{"Caller": "GetCiliumL2Leases"}).Errorf("Could not list leases: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
// leases, err := client.CoordinationV1().Leases("kube-system").List(context.TODO(), metav1.ListOptions{})
|
||||
// if err != nil {
|
||||
// log.WithFields(log.Fields{"Caller": "GetCiliumL2Leases"}).Errorf("Could not list leases: %s", err.Error())
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
ciliumLeases := []v1.Lease{}
|
||||
// ciliumLeases := []v1.Lease{}
|
||||
|
||||
for _, l := range leases.Items {
|
||||
if strings.HasPrefix(l.Name, "cilium-l2announce") {
|
||||
log.WithFields(log.Fields{
|
||||
"Caller": "GetCiliumL2Leases",
|
||||
"Lease": l.Name,
|
||||
}).Info("Found Cilium L2 lease")
|
||||
// Pretty-print a couple of fields for debugging
|
||||
if l.Spec.HolderIdentity != nil {
|
||||
log.WithFields(log.Fields{"HolderIdentity": *l.Spec.HolderIdentity}).Debug("HolderIdentity")
|
||||
}
|
||||
ciliumLeases = append(ciliumLeases, l)
|
||||
}
|
||||
}
|
||||
return ciliumLeases, nil
|
||||
}
|
||||
// for _, l := range leases.Items {
|
||||
// if strings.HasPrefix(l.Name, "cilium-l2announce") {
|
||||
// log.WithFields(log.Fields{
|
||||
// "Caller": "GetCiliumL2Leases",
|
||||
// "Lease": l.Name,
|
||||
// }).Info("Found Cilium L2 lease")
|
||||
// // Pretty-print a couple of fields for debugging
|
||||
// if l.Spec.HolderIdentity != nil {
|
||||
// log.WithFields(log.Fields{"HolderIdentity": *l.Spec.HolderIdentity}).Debug("HolderIdentity")
|
||||
// }
|
||||
// ciliumLeases = append(ciliumLeases, l)
|
||||
// }
|
||||
// }
|
||||
// return ciliumLeases, nil
|
||||
// }
|
||||
|
||||
func GetNodeNameFromLease(lease v1.Lease) (string, error) {
|
||||
if lease.Spec.HolderIdentity == nil {
|
||||
|
||||
Reference in New Issue
Block a user