Skip to content

Commit

Permalink
[POC] lease lock per IP pool
Browse files Browse the repository at this point in the history
This is a POC level code for changing lease/leaderElection
usage by whereabouts from a global lease to a per pool lease.

Signed-off-by: adrianc <[email protected]>
  • Loading branch information
adrianchiris committed Mar 29, 2023
1 parent 6a7b22c commit 1191229
Showing 1 changed file with 40 additions and 13 deletions.
53 changes: 40 additions & 13 deletions pkg/storage/kubernetes/ipam.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,11 @@ func normalizeRange(ipRange string) string {
return normalized
}

// leaseLockName returns the name of the leaseLock to be used for leader election
func leaseLockName(pi *PoolIdentifier) string {
return fmt.Sprintf("whereabouts-%s", IPPoolName(*pi))
}

func (i *KubernetesIPAM) getPool(ctx context.Context, name string, iprange string) (*whereaboutsv1alpha1.IPPool, error) {
ctxWithTimeout, cancel := context.WithTimeout(ctx, storage.RequestTimeout)
defer cancel()
Expand Down Expand Up @@ -252,6 +257,7 @@ func (c *KubernetesOverlappingRangeStore) UpdateOverlappingRangeAllocation(ctx c
case whereaboutstypes.Deallocate:
verb = "deallocate"
err = c.client.WhereaboutsV1alpha1().OverlappingRangeIPReservations(c.namespace).Delete(ctx, clusteripres.GetName(), metav1.DeleteOptions{})
//TODO(adrianc): if object does not exist should we still return an error ?
}

if err != nil {
Expand Down Expand Up @@ -330,7 +336,7 @@ func (p *KubernetesIPPool) Update(ctx context.Context, reservations []whereabout

// newLeaderElector creates a new leaderelection.LeaderElector and associated
// channels by which to observe elections and depositions.
func newLeaderElector(clientset kubernetes.Interface, namespace string, podNamespace string, podID string, leaseDuration int, renewDeadline int, retryPeriod int) (*leaderelection.LeaderElector, chan struct{}, chan struct{}) {
func newLeaderElector(clientset kubernetes.Interface, namespace string, podNamespace string, podID string, leaseDuration int, renewDeadline int, retryPeriod int, leaseLockName string) (*leaderelection.LeaderElector, chan struct{}, chan struct{}) {
//log.WithField("context", "leaderelection")
// leaderOK will block gRPC startup until it's closed.
leaderOK := make(chan struct{})
Expand All @@ -340,7 +346,7 @@ func newLeaderElector(clientset kubernetes.Interface, namespace string, podNames

var rl = &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: "whereabouts",
Name: leaseLockName,
Namespace: namespace,
},
Client: clientset.CoordinationV1(),
Expand Down Expand Up @@ -386,7 +392,11 @@ func IPManagement(ctx context.Context, mode int, ipamConf whereaboutstypes.IPAMC
}

// setup leader election
le, leader, deposed := newLeaderElector(client.clientSet, client.namespace, ipamConf.PodNamespace, ipamConf.PodName, ipamConf.LeaderLeaseDuration, ipamConf.LeaderRenewDeadline, ipamConf.LeaderRetryPeriod)

// TODO(adrianc): leader election should be moved into IPManagementKubernetesUpdate to handle multiple ranges, for now assume only one range.
perPoolLeaseLockName := leaseLockName(&PoolIdentifier{IpRange: ipamConf.IPRanges[0].Range, NetworkName: ipamConf.NetworkName})
le, leader, deposed := newLeaderElector(client.clientSet,
client.namespace, ipamConf.PodNamespace, ipamConf.PodName, ipamConf.LeaderLeaseDuration, ipamConf.LeaderRenewDeadline, ipamConf.LeaderRetryPeriod, perPoolLeaseLockName)
var wg sync.WaitGroup
wg.Add(2)

Expand Down Expand Up @@ -469,7 +479,6 @@ func IPManagementKubernetesUpdate(ctx context.Context, mode int, ipam *Kubernete

// handle the ip add/del until successful
var overlappingrangeallocations []whereaboutstypes.IPReservation
var ipforoverlappingrangeupdate net.IP
for _, ipRange := range ipamConf.IPRanges {
RETRYLOOP:
for j := 0; j < storage.DatastoreRetries; j++ {
Expand Down Expand Up @@ -522,15 +531,40 @@ func IPManagementKubernetesUpdate(ctx context.Context, mode int, ipam *Kubernete
continue
}

ipforoverlappingrangeupdate = newip.IP
// reserve IP in overlappingIPRangeAllocation
err = overlappingrangestore.UpdateOverlappingRangeAllocation(requestCtx, mode, newip.IP, containerID, podRef)
// TODO: We should probably return a specific type of error in case k8s returns error AlreadyExsits, then exit early in case of any other error
if err != nil {
logging.Errorf("Failed to update overlappingRangeAllocation (%v). Continuing loop. %v", err)
continue
}

defer func() {
if err != nil {
cleanupErr := overlappingrangestore.UpdateOverlappingRangeAllocation(requestCtx, mode, newip.IP, containerID, podRef)
if cleanupErr != nil {
logging.Errorf("failed to cleanup overlappingRangeAllocation: %v", cleanupErr)
}
}
}()
}

case whereaboutstypes.Deallocate:
var ipforoverlappingrangeupdate net.IP

updatedreservelist, ipforoverlappingrangeupdate, err = allocate.DeallocateIP(reservelist, containerID)
if err != nil {
logging.Errorf("Error deallocating IP: %v", err)
return newips, err
}

if ipamConf.OverlappingRanges {
err = overlappingrangestore.UpdateOverlappingRangeAllocation(requestCtx, mode, ipforoverlappingrangeupdate, containerID, podRef)
if err != nil {
logging.Errorf("Error performing UpdateOverlappingRangeAllocation: %v", err)
continue
}
}
}

// Clean out any dummy records from the reservelist...
Expand All @@ -549,6 +583,7 @@ func IPManagementKubernetesUpdate(ctx context.Context, mode int, ipam *Kubernete
err = pool.Update(requestCtx, usereservelist)
if err != nil {
logging.Errorf("IPAM error updating pool (attempt: %d): %v", j, err)

if e, ok := err.(storage.Temporary); ok && e.Temporary() {
continue
}
Expand All @@ -557,14 +592,6 @@ func IPManagementKubernetesUpdate(ctx context.Context, mode int, ipam *Kubernete
break RETRYLOOP
}

if ipamConf.OverlappingRanges {
err = overlappingrangestore.UpdateOverlappingRangeAllocation(requestCtx, mode, ipforoverlappingrangeupdate, containerID, podRef)
if err != nil {
logging.Errorf("Error performing UpdateOverlappingRangeAllocation: %v", err)
return newips, err
}
}

newips = append(newips, newip)
}
return newips, err
Expand Down

0 comments on commit 1191229

Please sign in to comment.