Skip to content

Commit

Permalink
Limit maximum number of pods to be evicted per node
Browse files Browse the repository at this point in the history
  • Loading branch information
ravisantoshgudimetla authored and ingvagabund committed Mar 22, 2018
1 parent 0a7f14d commit f1f8b2e
Show file tree
Hide file tree
Showing 15 changed files with 150 additions and 43 deletions.
6 changes: 6 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -46,3 +46,9 @@ test-unit:

test-e2e:
./test/run-e2e-tests.sh

gen:
./hack/update-codecgen.sh
./hack/update-generated-conversions.sh
./hack/update-generated-deep-copies.sh
./hack/update-generated-defaulters.sh
2 changes: 2 additions & 0 deletions cmd/descheduler/app/options/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,4 +55,6 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
}
3 changes: 3 additions & 0 deletions pkg/apis/componentconfig/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,4 +42,7 @@ type DeschedulerConfiguration struct {

// Node selectors
NodeSelector string

// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode int
}
3 changes: 3 additions & 0 deletions pkg/apis/componentconfig/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,4 +42,7 @@ type DeschedulerConfiguration struct {

// Node selectors
NodeSelector string `json:"nodeSelector,omitempty"`

// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
}
9 changes: 5 additions & 4 deletions pkg/descheduler/descheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,11 @@ func Run(rs *options.DeschedulerServer) error {
return nil
}

strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes)
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes)
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes)
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes)
nodePodCount := strategies.InitializeNodePodCount(nodes)
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)

return nil
}
15 changes: 10 additions & 5 deletions pkg/descheduler/strategies/duplicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@ limitations under the License.
package strategies

import (
"github.com/golang/glog"
"strings"

"github.com/golang/glog"

"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"

Expand All @@ -35,15 +36,15 @@ type DuplicatePodsMap map[string][]*v1.Pod
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
if !strategy.Enabled {
return
}
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun)
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode)
}

// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
podsEvicted := 0
for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v", node.Name)
Expand All @@ -53,16 +54,20 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
glog.V(1).Infof("%#v", creator)
// i = 0 does not evict the first pod
for i := 1; i < len(pods); i++ {
if nodepodCount[node]+1 > maxPodsToEvict {
break
}
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
if !success {
glog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
} else {
podsEvicted++
nodepodCount[node]++
glog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
}
}
}
}
podsEvicted += nodepodCount[node]
}
return podsEvicted
}
Expand Down
10 changes: 8 additions & 2 deletions pkg/descheduler/strategies/duplicates_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,15 @@ func TestFindDuplicatePods(t *testing.T) {
p5 := test.BuildTestPod("p5", 100, 0, node.Name)
p6 := test.BuildTestPod("p6", 100, 0, node.Name)
p7 := test.BuildTestPod("p7", 100, 0, node.Name)
p8 := test.BuildTestPod("p8", 100, 0, node.Name)
p9 := test.BuildTestPod("p9", 100, 0, node.Name)

// All the following pods expect for one will be evicted.
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p8.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()

// The following 4 pods won't get evicted.
// A daemonset.
Expand All @@ -66,12 +70,14 @@ func TestFindDuplicatePods(t *testing.T) {
expectedEvictedPodCount := 2
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7}}, nil
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9}}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node, nil
})
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false)
npe := nodePodEvictedCount{}
npe[node] = 0
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, 2)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted")
}
Expand Down
25 changes: 14 additions & 11 deletions pkg/descheduler/strategies/lownodeutilization.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ type NodeUsageMap struct {
}
type NodePodsMap map[*v1.Node][]*v1.Pod

func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) {
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
if !strategy.Enabled {
return
}
Expand Down Expand Up @@ -90,7 +90,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
glog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))

totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun)
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun, ds.MaxNoOfPodsToEvictPerNode, nodepodCount)
glog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted)

}
Expand Down Expand Up @@ -151,7 +151,7 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
return lowNodes, targetNodes
}

func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool) int {
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount nodePodEvictedCount) int {
podsEvicted := 0

SortNodesByUsage(targetNodes)
Expand Down Expand Up @@ -189,17 +189,17 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
nodeCapacity = node.node.Status.Allocatable
}
glog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
currentPodsEvicted := podsEvicted
currentPodsEvicted := nodepodCount[node.node]

// evict best effort pods
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
// evict burstable pods
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
// evict guaranteed pods
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)

podsEvictedFromNode := podsEvicted - currentPodsEvicted
glog.V(1).Infof("%v pods evicted from node %#v with usage %v", podsEvictedFromNode, node.node.Name, node.usage)
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
nodepodCount[node.node] = currentPodsEvicted
podsEvicted = podsEvicted + nodepodCount[node.node]
glog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage)
}
return podsEvicted
}
Expand All @@ -214,10 +214,13 @@ func evictPods(inputPods []*v1.Pod,
totalCpu *float64,
totalMem *float64,
podsEvicted *int,
dryRun bool) {
dryRun bool, maxPodsToEvict int) {
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCpu > 0 || *totalMem > 0) {
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
for _, pod := range inputPods {
if *podsEvicted+1 > maxPodsToEvict {
break
}
cUsage := helper.GetResourceRequest(pod, v1.ResourceCPU)
mUsage := helper.GetResourceRequest(pod, v1.ResourceMemory)
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
Expand Down
10 changes: 7 additions & 3 deletions pkg/descheduler/strategies/lownodeutilization_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,15 +109,19 @@ func TestLowNodeUtilization(t *testing.T) {
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
})
expectedPodsEvicted := 4
expectedPodsEvicted := 3
npm := CreateNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
if len(lowNodes) != 1 {
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
}
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false)
npe := nodePodEvictedCount{}
npe[n1] = 0
npe[n2] = 0
npe[n3] = 0
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
if expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted)
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
}

}
Expand Down
12 changes: 8 additions & 4 deletions pkg/descheduler/strategies/node_affinity.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,12 @@ import (
"k8s.io/api/core/v1"
)

func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) {
evictionCount := removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes)
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
evictionCount := removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
glog.V(1).Infof("Evicted %v pods", evictionCount)
}

func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) int {
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
evictedPodCount := 0
if !strategy.Enabled {
return evictedPodCount
Expand All @@ -51,15 +51,19 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
}

for _, pod := range pods {
if nodepodCount[node]+1 > maxPodsToEvict {
break
}
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {

if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
glog.V(1).Infof("Evicting pod: %v", pod.Name)
evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, false)
evictedPodCount++
nodepodCount[node]++
}
}
}
evictedPodCount += nodepodCount[node]
}
default:
glog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
Expand Down
35 changes: 28 additions & 7 deletions pkg/descheduler/strategies/node_affinity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
pods []v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
npe nodePodEvictedCount
maxPodsToEvict int
}{
{
description: "Strategy disabled, should not evict any pods",
Expand All @@ -104,8 +106,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Invalid strategy type, should not evict any pods",
Expand All @@ -118,29 +122,46 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Pod is correctly scheduled on node, no eviction expected",
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels),
nodes: []*v1.Node{nodeWithLabels},
pods: addPodsToNode(nodeWithLabels),
nodes: []*v1.Node{nodeWithLabels},
npe: nodePodEvictedCount{nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 0, should not be evicted",
expectedEvictedPodCount: 0,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
expectedEvictedPodCount: 0,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
maxPodsToEvict: 0,
},
}

Expand All @@ -155,7 +176,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
Client: fakeClient,
}

actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes)
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict)
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
Expand Down
12 changes: 8 additions & 4 deletions pkg/descheduler/strategies/pod_antiaffinity.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,15 +31,15 @@ import (
)

// RemovePodsViolatingInterPodAntiAffinity with elimination strategy
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
if !strategy.Enabled {
return
}
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun)
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
}

// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int) int {
podsEvicted := 0
for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v\n", node.Name)
Expand All @@ -49,12 +49,15 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
}
totalPods := len(pods)
for i := 0; i < totalPods; i++ {
if nodePodCount[node]+1 > maxPodsToEvict {
break
}
if checkPodsWithAntiAffinityExist(pods[i], pods) {
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
if !success {
glog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
} else {
podsEvicted++
nodePodCount[node]++
glog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err)
// Since the current pod is evicted all other pods which have anti-affinity with this
// pod need not be evicted.
Expand All @@ -65,6 +68,7 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
}
}
}
podsEvicted += nodePodCount[node]
}
return podsEvicted
}
Expand Down
Loading

0 comments on commit f1f8b2e

Please sign in to comment.