Skip to content

Commit

Permalink
Respond to review comments.
Browse files Browse the repository at this point in the history
  • Loading branch information
David Oppenheimer committed Jul 5, 2015
1 parent 4ea8b8a commit 9fbccb4
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
12 changes: 6 additions & 6 deletions plugin/pkg/scheduler/algorithm/priorities/priorities.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func calculateScore(requested int64, capacity int64, node string) int {
return 0
}
if requested > capacity {
glog.Infof("Combined requested resources %d from existing pods exceeds capacity %d on minion: %s",
glog.Infof("Combined requested resources %d from existing pods exceeds capacity %d on node %s",
requested, capacity, node)
return 0
}
Expand All @@ -52,7 +52,7 @@ const defaultMemoryLimit int64 = 60 * 1024 * 1024 // 60 MB

// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList"
// as an additional argument here) rather than using constants
func toNonzeroLimits(limits *api.ResourceList) (int64, int64) {
func getNonzeroLimits(limits *api.ResourceList) (int64, int64) {
var out_millicpu, out_memory int64
// Override if un-set, but not if explicitly set to zero
if (*limits.Cpu() == resource.Quantity{}) {
Expand All @@ -79,15 +79,15 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al

for _, existingPod := range pods {
for _, container := range existingPod.Spec.Containers {
cpu, memory := toNonzeroLimits(&container.Resources.Limits)
cpu, memory := getNonzeroLimits(&container.Resources.Limits)
totalMilliCPU += cpu
totalMemory += memory
}
}
// Add the resources requested by the current pod being scheduled.
// This also helps differentiate between differently sized, but empty, minions.
for _, container := range pod.Spec.Containers {
cpu, memory := toNonzeroLimits(&container.Resources.Limits)
cpu, memory := getNonzeroLimits(&container.Resources.Limits)
totalMilliCPU += cpu
totalMemory += memory
}
Expand Down Expand Up @@ -195,15 +195,15 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
score := int(0)
for _, existingPod := range pods {
for _, container := range existingPod.Spec.Containers {
cpu, memory := toNonzeroLimits(&container.Resources.Limits)
cpu, memory := getNonzeroLimits(&container.Resources.Limits)
totalMilliCPU += cpu
totalMemory += memory
}
}
// Add the resources requested by the current pod being scheduled.
// This also helps differentiate between differently sized, but empty, minions.
for _, container := range pod.Spec.Containers {
cpu, memory := toNonzeroLimits(&container.Resources.Limits)
cpu, memory := getNonzeroLimits(&container.Resources.Limits)
totalMilliCPU += cpu
totalMemory += memory
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorith
}
result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)})
glog.V(10).Infof(
"%v -> %v: ServiceSpreadPriority, Sore: (%d)", pod.Name, minion.Name, int(fScore),
"%v -> %v: ServiceSpreadPriority, Score: (%d)", pod.Name, minion.Name, int(fScore),
)
}
return result, nil
Expand Down

0 comments on commit 9fbccb4

Please sign in to comment.