Skip to content

Commit 1ea8b3b

Browse files
committed
Fix a crash on node migration.
After an unsuccessful initial cluster sync it may happen that the cluster statefulset is empty. This has been made more likely since 88d6a7b, since it has introduced syncing volumes before statefulsets, and the volume sync mail fail for different reasons (i.e. the volume has been shrinked, or too many calls to Amazon).
1 parent e09e62f commit 1ea8b3b

File tree

1 file changed

+9
-0
lines changed

1 file changed

+9
-0
lines changed

pkg/cluster/pod.go

+9
Original file line numberDiff line numberDiff line change
@@ -203,6 +203,15 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
203203
c.logger.Warningf("pod %q is not a master", podName)
204204
return nil
205205
}
206+
// we must have a statefulset in the cluster for the migration to work
207+
if c.Statefulset == nil {
208+
sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(), metav1.GetOptions{})
209+
if err != nil {
210+
return fmt.Errorf("could not retrieve cluster statefulset: %v", err)
211+
}
212+
c.Statefulset = sset
213+
}
214+
// We may not have a cached statefulset if the initial cluster sync has aborted, revert to the spec in that case.
206215
if *c.Statefulset.Spec.Replicas == 1 {
207216
c.logger.Warningf("single master pod for cluster %q, migration will cause longer downtime of the master instance", c.clusterName())
208217
} else {

0 commit comments

Comments
 (0)