Skip to content

Commit 1841742

Browse files
committed
DRA E2E node: adapt to v1alpha3 API
1 parent 2bce123 commit 1841742

File tree

1 file changed

+39
-21
lines changed

1 file changed

+39
-21
lines changed

test/e2e_node/dra_test.go

Lines changed: 39 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ import (
4141
resourceapi "k8s.io/api/resource/v1alpha3"
4242
apierrors "k8s.io/apimachinery/pkg/api/errors"
4343
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
44+
"k8s.io/apimachinery/pkg/runtime"
4445
"k8s.io/client-go/kubernetes"
4546
"k8s.io/klog/v2"
4647
draplugin "k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin"
@@ -74,7 +75,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
7475
// When plugin and kubelet get killed at the end of the tests, they leave ResourceSlices behind.
7576
// Perhaps garbage collection would eventually remove them (not sure how the node instance
7677
// is managed), but this could take time. Let's clean up explicitly.
77-
framework.ExpectNoError(f.ClientSet.ResourceV1alpha2().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}))
78+
framework.ExpectNoError(f.ClientSet.ResourceV1alpha3().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}))
7879
})
7980
})
8081

@@ -562,7 +563,7 @@ func newKubeletPlugin(ctx context.Context, clientSet kubernetes.Interface, nodeN
562563
ginkgo.DeferCleanup(func(ctx context.Context) {
563564
// kubelet should do this eventually, but better make sure.
564565
// A separate test checks this explicitly.
565-
framework.ExpectNoError(clientSet.ResourceV1alpha3().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: "driverName=" + driverName}))
566+
framework.ExpectNoError(clientSet.ResourceV1alpha3().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + driverName}))
566567
})
567568
ginkgo.DeferCleanup(plugin.Stop)
568569

@@ -573,18 +574,17 @@ func newKubeletPlugin(ctx context.Context, clientSet kubernetes.Interface, nodeN
573574
// NOTE: as scheduler and controller manager are not running by the Node e2e,
574575
// the objects must contain all required data to be processed correctly by the API server
575576
// and placed on the node without involving the scheduler and the DRA controller
576-
func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, nodename, namespace, className, claimName, podName string, deferPodDeletion bool, pluginNames []string) *v1.Pod {
577-
// ResourceClass
578-
class := &resourceapi.ResourceClass{
577+
func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, nodename, namespace, className, claimName, podName string, deferPodDeletion bool, driverNames []string) *v1.Pod {
578+
// DeviceClass
579+
class := &resourceapi.DeviceClass{
579580
ObjectMeta: metav1.ObjectMeta{
580581
Name: className,
581582
},
582-
DriverName: "controller",
583583
}
584-
_, err := clientSet.ResourceV1alpha3().ResourceClasses().Create(ctx, class, metav1.CreateOptions{})
584+
_, err := clientSet.ResourceV1alpha3().DeviceClasses().Create(ctx, class, metav1.CreateOptions{})
585585
framework.ExpectNoError(err)
586586

587-
ginkgo.DeferCleanup(clientSet.ResourceV1alpha3().ResourceClasses().Delete, className, metav1.DeleteOptions{})
587+
ginkgo.DeferCleanup(clientSet.ResourceV1alpha3().DeviceClasses().Delete, className, metav1.DeleteOptions{})
588588

589589
// ResourceClaim
590590
podClaimName := "resource-claim"
@@ -593,7 +593,12 @@ func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, node
593593
Name: claimName,
594594
},
595595
Spec: resourceapi.ResourceClaimSpec{
596-
ResourceClassName: className,
596+
Devices: resourceapi.DeviceClaim{
597+
Requests: []resourceapi.DeviceRequest{{
598+
Name: "my-request",
599+
DeviceClassName: className,
600+
}},
601+
},
597602
},
598603
}
599604
createdClaim, err := clientSet.ResourceV1alpha3().ResourceClaims(namespace).Create(ctx, claim, metav1.CreateOptions{})
@@ -637,21 +642,32 @@ func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, node
637642
}
638643

639644
// Update claim status: set ReservedFor and AllocationResult
640-
// NOTE: This is usually done by the DRA controller
641-
resourceHandlers := make([]resourceapi.ResourceHandle, len(pluginNames))
642-
for i, pluginName := range pluginNames {
643-
resourceHandlers[i] = resourceapi.ResourceHandle{
644-
DriverName: pluginName,
645-
Data: "{\"EnvVars\":{\"DRA_PARAM1\":\"PARAM1_VALUE\"},\"NodeName\":\"\"}",
645+
// NOTE: This is usually done by the DRA controller or the scheduler.
646+
results := make([]resourceapi.DeviceRequestAllocationResult, len(driverNames))
647+
for i, driverName := range driverNames {
648+
results[i] = resourceapi.DeviceRequestAllocationResult{
649+
Driver: driverName,
650+
Pool: "some-pool",
651+
Device: "some-device",
646652
}
647653
}
654+
648655
createdClaim.Status = resourceapi.ResourceClaimStatus{
649-
DriverName: "controller",
650656
ReservedFor: []resourceapi.ResourceClaimConsumerReference{
651657
{Resource: "pods", Name: podName, UID: createdPod.UID},
652658
},
653659
Allocation: &resourceapi.AllocationResult{
654-
ResourceHandles: resourceHandlers,
660+
Devices: resourceapi.DeviceAllocationResult{
661+
Results: results,
662+
Config: []resourceapi.DeviceAllocationConfiguration{{
663+
DeviceConfiguration: resourceapi.DeviceConfiguration{
664+
Opaque: &resourceapi.OpaqueDeviceConfiguration{
665+
Driver: driverName,
666+
Parameters: runtime.RawExtension{Raw: []byte(`{"EnvVars":{"DRA_PARAM1":"PARAM1_VALUE"}}`)},
667+
},
668+
},
669+
}},
670+
},
655671
},
656672
}
657673
_, err = clientSet.ResourceV1alpha3().ResourceClaims(namespace).UpdateStatus(ctx, createdClaim, metav1.UpdateOptions{})
@@ -665,10 +681,12 @@ func createTestResourceSlice(ctx context.Context, clientSet kubernetes.Interface
665681
ObjectMeta: metav1.ObjectMeta{
666682
Name: nodeName,
667683
},
668-
NodeName: nodeName,
669-
DriverName: driverName,
670-
ResourceModel: resourceapi.ResourceModel{
671-
NamedResources: &resourceapi.NamedResourcesResources{},
684+
Spec: resourceapi.ResourceSliceSpec{
685+
NodeName: nodeName,
686+
Driver: driverName,
687+
Pool: resourceapi.ResourcePool{
688+
Name: nodeName,
689+
},
672690
},
673691
}
674692

0 commit comments

Comments
 (0)