Skip to content

Commit

Permalink
Merge branch 'replace-v1-with-corev1' into 'master'
Browse files Browse the repository at this point in the history
Use corev1 import for k8s.io/api/core/v1 consistently

See merge request nvidia/kubernetes/gpu-operator!971
  • Loading branch information
tariq1890 committed Dec 1, 2023
2 parents f7dae1b + de2a96a commit ad1859f
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 34 deletions.
29 changes: 14 additions & 15 deletions controllers/object_controls.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
"golang.org/x/mod/semver"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
nodev1 "k8s.io/api/node/v1"
nodev1beta1 "k8s.io/api/node/v1beta1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
Expand Down Expand Up @@ -954,14 +953,14 @@ func applyOCPProxySpec(n ClusterPolicyController, podSpec *corev1.PodSpec) error
MountPath: TrustedCABundleMountDir,
})
podSpec.Volumes = append(podSpec.Volumes,
v1.Volume{
corev1.Volume{
Name: TrustedCAConfigMapName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
LocalObjectReference: corev1.LocalObjectReference{
Name: TrustedCAConfigMapName,
},
Items: []v1.KeyToPath{
Items: []corev1.KeyToPath{
{
Key: TrustedCABundleFileName,
Path: TrustedCACertificate,
Expand Down Expand Up @@ -1019,8 +1018,8 @@ func getOrCreateTrustedCAConfigMap(n ClusterPolicyController, name string) (*cor
}

// get proxy env variables from cluster wide proxy in OCP
func getProxyEnv(proxyConfig *apiconfigv1.Proxy) []v1.EnvVar {
envVars := []v1.EnvVar{}
func getProxyEnv(proxyConfig *apiconfigv1.Proxy) []corev1.EnvVar {
envVars := []corev1.EnvVar{}
if proxyConfig == nil {
return envVars
}
Expand All @@ -1041,11 +1040,11 @@ func getProxyEnv(proxyConfig *apiconfigv1.Proxy) []v1.EnvVar {
if len(v) == 0 {
continue
}
upperCaseEnvvar := v1.EnvVar{
upperCaseEnvvar := corev1.EnvVar{
Name: strings.ToUpper(e),
Value: v,
}
lowerCaseEnvvar := v1.EnvVar{
lowerCaseEnvvar := corev1.EnvVar{
Name: strings.ToLower(e),
Value: v,
}
Expand Down Expand Up @@ -1405,7 +1404,7 @@ func TransformDCGMExporter(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpe
return err
}

initContainer := v1.Container{}
initContainer := corev1.Container{}
if initImage != "" {
initContainer.Image = initImage
}
Expand Down Expand Up @@ -2548,7 +2547,7 @@ func transformPrecompiledDriverDaemonset(obj *appsv1.DaemonSet, config *gpuv1.Cl
func transformOpenShiftDriverToolkitContainer(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, n ClusterPolicyController, mainContainerName string) error {
var err error

getContainer := func(name string, remove bool) (*v1.Container, error) {
getContainer := func(name string, remove bool) (*corev1.Container, error) {
for i, container := range obj.Spec.Template.Spec.Containers {
if container.Name != name {
continue
Expand Down Expand Up @@ -2581,7 +2580,7 @@ func transformOpenShiftDriverToolkitContainer(obj *appsv1.DaemonSet, config *gpu
}

/* find the main container and driver-toolkit sidecar container */
var mainContainer, driverToolkitContainer *v1.Container
var mainContainer, driverToolkitContainer *corev1.Container
if mainContainer, err = getContainer(mainContainerName, false); err != nil {
return err
}
Expand Down Expand Up @@ -3199,15 +3198,15 @@ func transformValidationInitContainer(obj *appsv1.DaemonSet, config *gpuv1.Clust
return nil
}

func addPullSecrets(podSpec *v1.PodSpec, secrets []string) {
func addPullSecrets(podSpec *corev1.PodSpec, secrets []string) {
for _, secret := range secrets {
if !containsSecret(podSpec.ImagePullSecrets, secret) {
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: secret})
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, corev1.LocalObjectReference{Name: secret})
}
}
}

func containsSecret(secrets []v1.LocalObjectReference, secretName string) bool {
func containsSecret(secrets []corev1.LocalObjectReference, secretName string) bool {
for _, s := range secrets {
if s.Name == secretName {
return true
Expand Down Expand Up @@ -3470,7 +3469,7 @@ func ocpHasDriverToolkitImageStream(n *ClusterPolicyController) (bool, error) {
// With OpenShift DriverToolkit, we need to ensure that this secret is
// populated, otherwise, the Pod won't have the credentials to access
// the DriverToolkit image in the cluster registry.
func serviceAccountHasDockerCfg(obj *v1.ServiceAccount, n ClusterPolicyController) (bool, error) {
func serviceAccountHasDockerCfg(obj *corev1.ServiceAccount, n ClusterPolicyController) (bool, error) {
ctx := n.ctx
logger := n.rec.Log.WithValues("ServiceAccount", obj.Name)

Expand Down
6 changes: 3 additions & 3 deletions internal/validator/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (
"context"
"fmt"

v1 "k8s.io/api/core/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"

Expand Down Expand Up @@ -72,8 +72,8 @@ func (nsv *nodeSelectorValidator) Validate(ctx context.Context, cr *nvidiav1alph
}

// getNVIDIADriverSelectedNodes returns selected nodes based on the nodeselector labels set for a given NVIDIADriver instance
func (nsv *nodeSelectorValidator) getNVIDIADriverSelectedNodes(ctx context.Context, cr *nvidiav1alpha1.NVIDIADriver) (*v1.NodeList, error) {
nodeList := &v1.NodeList{}
func (nsv *nodeSelectorValidator) getNVIDIADriverSelectedNodes(ctx context.Context, cr *nvidiav1alpha1.NVIDIADriver) (*corev1.NodeList, error) {
nodeList := &corev1.NodeList{}

if cr.Spec.NodeSelector == nil {
cr.Spec.NodeSelector = cr.GetNodeSelector()
Expand Down
32 changes: 16 additions & 16 deletions validator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ import (
log "github.com/sirupsen/logrus"
cli "github.com/urfave/cli/v2"

v1 "k8s.io/api/core/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
Expand Down Expand Up @@ -953,14 +953,14 @@ func (p *Plugin) runWorkload() error {

imagePullPolicy := os.Getenv(validatorImagePullPolicyEnvName)
if imagePullPolicy != "" {
pod.Spec.Containers[0].ImagePullPolicy = v1.PullPolicy(imagePullPolicy)
pod.Spec.InitContainers[0].ImagePullPolicy = v1.PullPolicy(imagePullPolicy)
pod.Spec.Containers[0].ImagePullPolicy = corev1.PullPolicy(imagePullPolicy)
pod.Spec.InitContainers[0].ImagePullPolicy = corev1.PullPolicy(imagePullPolicy)
}

if os.Getenv(validatorImagePullSecretsEnvName) != "" {
pullSecrets := strings.Split(os.Getenv(validatorImagePullSecretsEnvName), ",")
for _, secret := range pullSecrets {
pod.Spec.ImagePullSecrets = append(pod.Spec.ImagePullSecrets, v1.LocalObjectReference{Name: secret})
pod.Spec.ImagePullSecrets = append(pod.Spec.ImagePullSecrets, corev1.LocalObjectReference{Name: secret})
}
}
if os.Getenv(validatorRuntimeClassEnvName) != "" {
Expand Down Expand Up @@ -988,7 +988,7 @@ func (p *Plugin) runWorkload() error {
return err
}

gpuResource := v1.ResourceList{
gpuResource := corev1.ResourceList{
resourceName: resource.MustParse("1"),
}

Expand Down Expand Up @@ -1027,7 +1027,7 @@ func (p *Plugin) runWorkload() error {
return nil
}

func setOwnerReference(ctx context.Context, kubeClient kubernetes.Interface, pod *v1.Pod) error {
func setOwnerReference(ctx context.Context, kubeClient kubernetes.Interface, pod *corev1.Pod) error {
// get owner of validator daemonset (which is ClusterPolicy)
validatorDaemonset, err := kubeClient.AppsV1().DaemonSets(namespaceFlag).Get(ctx, "nvidia-operator-validator", meta_v1.GetOptions{})
if err != nil {
Expand All @@ -1039,7 +1039,7 @@ func setOwnerReference(ctx context.Context, kubeClient kubernetes.Interface, pod
return nil
}

func setTolerations(ctx context.Context, kubeClient kubernetes.Interface, pod *v1.Pod) error {
func setTolerations(ctx context.Context, kubeClient kubernetes.Interface, pod *corev1.Pod) error {
// get tolerations of validator daemonset
validatorDaemonset, err := kubeClient.AppsV1().DaemonSets(namespaceFlag).Get(ctx, "nvidia-operator-validator", meta_v1.GetOptions{})
if err != nil {
Expand Down Expand Up @@ -1071,8 +1071,8 @@ func waitForPod(ctx context.Context, kubeClient kubernetes.Interface, name strin
return fmt.Errorf("gave up waiting for pod %s to be available", name)
}

func loadPodSpec(podSpecPath string) (*v1.Pod, error) {
var pod v1.Pod
func loadPodSpec(podSpecPath string) (*corev1.Pod, error) {
var pod corev1.Pod
manifest, err := os.ReadFile(podSpecPath)
if err != nil {
panic(err)
Expand Down Expand Up @@ -1134,7 +1134,7 @@ func (p *Plugin) validateGPUResource() error {
return fmt.Errorf("GPU resources are not discovered by the node")
}

func (p *Plugin) availableMIGResourceName(resources v1.ResourceList) v1.ResourceName {
func (p *Plugin) availableMIGResourceName(resources corev1.ResourceList) corev1.ResourceName {
for resourceName, quantity := range resources {
if strings.HasPrefix(string(resourceName), migGPUResourcePrefix) && quantity.Value() >= 1 {
log.Debugf("Found MIG GPU resource name %s quantity %d", resourceName, quantity.Value())
Expand All @@ -1144,7 +1144,7 @@ func (p *Plugin) availableMIGResourceName(resources v1.ResourceList) v1.Resource
return ""
}

func (p *Plugin) availableGenericResourceName(resources v1.ResourceList) v1.ResourceName {
func (p *Plugin) availableGenericResourceName(resources corev1.ResourceList) corev1.ResourceName {
for resourceName, quantity := range resources {
if strings.HasPrefix(string(resourceName), genericGPUResourceType) && quantity.Value() >= 1 {
log.Debugf("Found GPU resource name %s quantity %d", resourceName, quantity.Value())
Expand All @@ -1154,7 +1154,7 @@ func (p *Plugin) availableGenericResourceName(resources v1.ResourceList) v1.Reso
return ""
}

func (p *Plugin) getGPUResourceName() (v1.ResourceName, error) {
func (p *Plugin) getGPUResourceName() (corev1.ResourceName, error) {
// get node info to check allocatable GPU resources
node, err := getNode(p.ctx, p.kubeClient)
if err != nil {
Expand All @@ -1177,7 +1177,7 @@ func (p *Plugin) setKubeClient(kubeClient kubernetes.Interface) {
p.kubeClient = kubeClient
}

func getNode(ctx context.Context, kubeClient kubernetes.Interface) (*v1.Node, error) {
func getNode(ctx context.Context, kubeClient kubernetes.Interface) (*corev1.Node, error) {
node, err := kubeClient.CoreV1().Nodes().Get(ctx, nodeNameFlag, meta_v1.GetOptions{})
if err != nil {
log.Errorf("unable to get node with name %s, err %s", nodeNameFlag, err.Error())
Expand Down Expand Up @@ -1242,14 +1242,14 @@ func (c *CUDA) runWorkload() error {

imagePullPolicy := os.Getenv(validatorImagePullPolicyEnvName)
if imagePullPolicy != "" {
pod.Spec.Containers[0].ImagePullPolicy = v1.PullPolicy(imagePullPolicy)
pod.Spec.InitContainers[0].ImagePullPolicy = v1.PullPolicy(imagePullPolicy)
pod.Spec.Containers[0].ImagePullPolicy = corev1.PullPolicy(imagePullPolicy)
pod.Spec.InitContainers[0].ImagePullPolicy = corev1.PullPolicy(imagePullPolicy)
}

if os.Getenv(validatorImagePullSecretsEnvName) != "" {
pullSecrets := strings.Split(os.Getenv(validatorImagePullSecretsEnvName), ",")
for _, secret := range pullSecrets {
pod.Spec.ImagePullSecrets = append(pod.Spec.ImagePullSecrets, v1.LocalObjectReference{Name: secret})
pod.Spec.ImagePullSecrets = append(pod.Spec.ImagePullSecrets, corev1.LocalObjectReference{Name: secret})
}
}
if os.Getenv(validatorRuntimeClassEnvName) != "" {
Expand Down

0 comments on commit ad1859f

Please sign in to comment.