@@ -41,6 +41,7 @@ import (
41
41
resourceapi "k8s.io/api/resource/v1alpha3"
42
42
apierrors "k8s.io/apimachinery/pkg/api/errors"
43
43
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
44
+ "k8s.io/apimachinery/pkg/runtime"
44
45
"k8s.io/client-go/kubernetes"
45
46
"k8s.io/klog/v2"
46
47
draplugin "k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin"
@@ -74,7 +75,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
74
75
// When plugin and kubelet get killed at the end of the tests, they leave ResourceSlices behind.
75
76
// Perhaps garbage collection would eventually remove them (not sure how the node instance
76
77
// is managed), but this could take time. Let's clean up explicitly.
77
- framework .ExpectNoError (f .ClientSet .ResourceV1alpha2 ().ResourceSlices ().DeleteCollection (ctx , metav1.DeleteOptions {}, metav1.ListOptions {}))
78
+ framework .ExpectNoError (f .ClientSet .ResourceV1alpha3 ().ResourceSlices ().DeleteCollection (ctx , metav1.DeleteOptions {}, metav1.ListOptions {}))
78
79
})
79
80
})
80
81
@@ -562,7 +563,7 @@ func newKubeletPlugin(ctx context.Context, clientSet kubernetes.Interface, nodeN
562
563
ginkgo .DeferCleanup (func (ctx context.Context ) {
563
564
// kubelet should do this eventually, but better make sure.
564
565
// A separate test checks this explicitly.
565
- framework .ExpectNoError (clientSet .ResourceV1alpha3 ().ResourceSlices ().DeleteCollection (ctx , metav1.DeleteOptions {}, metav1.ListOptions {FieldSelector : "driverName =" + driverName }))
566
+ framework .ExpectNoError (clientSet .ResourceV1alpha3 ().ResourceSlices ().DeleteCollection (ctx , metav1.DeleteOptions {}, metav1.ListOptions {FieldSelector : resourceapi . ResourceSliceSelectorDriver + " =" + driverName }))
566
567
})
567
568
ginkgo .DeferCleanup (plugin .Stop )
568
569
@@ -573,18 +574,17 @@ func newKubeletPlugin(ctx context.Context, clientSet kubernetes.Interface, nodeN
573
574
// NOTE: as scheduler and controller manager are not running by the Node e2e,
574
575
// the objects must contain all required data to be processed correctly by the API server
575
576
// and placed on the node without involving the scheduler and the DRA controller
576
- func createTestObjects (ctx context.Context , clientSet kubernetes.Interface , nodename , namespace , className , claimName , podName string , deferPodDeletion bool , pluginNames []string ) * v1.Pod {
577
- // ResourceClass
578
- class := & resourceapi.ResourceClass {
577
+ func createTestObjects (ctx context.Context , clientSet kubernetes.Interface , nodename , namespace , className , claimName , podName string , deferPodDeletion bool , driverNames []string ) * v1.Pod {
578
+ // DeviceClass
579
+ class := & resourceapi.DeviceClass {
579
580
ObjectMeta : metav1.ObjectMeta {
580
581
Name : className ,
581
582
},
582
- DriverName : "controller" ,
583
583
}
584
- _ , err := clientSet .ResourceV1alpha3 ().ResourceClasses ().Create (ctx , class , metav1.CreateOptions {})
584
+ _ , err := clientSet .ResourceV1alpha3 ().DeviceClasses ().Create (ctx , class , metav1.CreateOptions {})
585
585
framework .ExpectNoError (err )
586
586
587
- ginkgo .DeferCleanup (clientSet .ResourceV1alpha3 ().ResourceClasses ().Delete , className , metav1.DeleteOptions {})
587
+ ginkgo .DeferCleanup (clientSet .ResourceV1alpha3 ().DeviceClasses ().Delete , className , metav1.DeleteOptions {})
588
588
589
589
// ResourceClaim
590
590
podClaimName := "resource-claim"
@@ -593,7 +593,12 @@ func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, node
593
593
Name : claimName ,
594
594
},
595
595
Spec : resourceapi.ResourceClaimSpec {
596
- ResourceClassName : className ,
596
+ Devices : resourceapi.DeviceClaim {
597
+ Requests : []resourceapi.DeviceRequest {{
598
+ Name : "my-request" ,
599
+ DeviceClassName : className ,
600
+ }},
601
+ },
597
602
},
598
603
}
599
604
createdClaim , err := clientSet .ResourceV1alpha3 ().ResourceClaims (namespace ).Create (ctx , claim , metav1.CreateOptions {})
@@ -637,21 +642,32 @@ func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, node
637
642
}
638
643
639
644
// Update claim status: set ReservedFor and AllocationResult
640
- // NOTE: This is usually done by the DRA controller
641
- resourceHandlers := make ([]resourceapi.ResourceHandle , len (pluginNames ))
642
- for i , pluginName := range pluginNames {
643
- resourceHandlers [i ] = resourceapi.ResourceHandle {
644
- DriverName : pluginName ,
645
- Data : "{\" EnvVars\" :{\" DRA_PARAM1\" :\" PARAM1_VALUE\" },\" NodeName\" :\" \" }" ,
645
+ // NOTE: This is usually done by the DRA controller or the scheduler.
646
+ results := make ([]resourceapi.DeviceRequestAllocationResult , len (driverNames ))
647
+ for i , driverName := range driverNames {
648
+ results [i ] = resourceapi.DeviceRequestAllocationResult {
649
+ Driver : driverName ,
650
+ Pool : "some-pool" ,
651
+ Device : "some-device" ,
646
652
}
647
653
}
654
+
648
655
createdClaim .Status = resourceapi.ResourceClaimStatus {
649
- DriverName : "controller" ,
650
656
ReservedFor : []resourceapi.ResourceClaimConsumerReference {
651
657
{Resource : "pods" , Name : podName , UID : createdPod .UID },
652
658
},
653
659
Allocation : & resourceapi.AllocationResult {
654
- ResourceHandles : resourceHandlers ,
660
+ Devices : resourceapi.DeviceAllocationResult {
661
+ Results : results ,
662
+ Config : []resourceapi.DeviceAllocationConfiguration {{
663
+ DeviceConfiguration : resourceapi.DeviceConfiguration {
664
+ Opaque : & resourceapi.OpaqueDeviceConfiguration {
665
+ Driver : driverName ,
666
+ Parameters : runtime.RawExtension {Raw : []byte (`{"EnvVars":{"DRA_PARAM1":"PARAM1_VALUE"}}` )},
667
+ },
668
+ },
669
+ }},
670
+ },
655
671
},
656
672
}
657
673
_ , err = clientSet .ResourceV1alpha3 ().ResourceClaims (namespace ).UpdateStatus (ctx , createdClaim , metav1.UpdateOptions {})
@@ -665,10 +681,12 @@ func createTestResourceSlice(ctx context.Context, clientSet kubernetes.Interface
665
681
ObjectMeta : metav1.ObjectMeta {
666
682
Name : nodeName ,
667
683
},
668
- NodeName : nodeName ,
669
- DriverName : driverName ,
670
- ResourceModel : resourceapi.ResourceModel {
671
- NamedResources : & resourceapi.NamedResourcesResources {},
684
+ Spec : resourceapi.ResourceSliceSpec {
685
+ NodeName : nodeName ,
686
+ Driver : driverName ,
687
+ Pool : resourceapi.ResourcePool {
688
+ Name : nodeName ,
689
+ },
672
690
},
673
691
}
674
692
0 commit comments