From 0879aa6e9a04fc64acb36fe2be46ee336bd9b25a Mon Sep 17 00:00:00 2001 From: grokspawn Date: Tue, 15 Jul 2025 10:29:05 -0500 Subject: [PATCH 1/3] initial vibe --- go.mod | 1 + go.sum | 2 + internal/catalogd/graphql/discovery_test.go | 498 +++++++++++++ internal/catalogd/graphql/graphql.go | 692 +++++++++++++++++++ internal/catalogd/graphql/graphql_test.go | 333 +++++++++ internal/catalogd/graphql/sample-queries.txt | 412 +++++++++++ 6 files changed, 1938 insertions(+) create mode 100644 internal/catalogd/graphql/discovery_test.go create mode 100644 internal/catalogd/graphql/graphql.go create mode 100644 internal/catalogd/graphql/graphql_test.go create mode 100644 internal/catalogd/graphql/sample-queries.txt diff --git a/go.mod b/go.mod index 605549045..2fd0702ca 100644 --- a/go.mod +++ b/go.mod @@ -132,6 +132,7 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect + github.com/graphql-go/graphql v0.8.1 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 // indirect github.com/h2non/filetype v1.1.3 // indirect diff --git a/go.sum b/go.sum index c6c4f5c48..65ba6fb3b 100644 --- a/go.sum +++ b/go.sum @@ -260,6 +260,8 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/graphql-go/graphql v0.8.1 h1:p7/Ou/WpmulocJeEx7wjQy611rtXGQaAcXGqanuMMgc= +github.com/graphql-go/graphql v0.8.1/go.mod h1:nKiHzRM0qopJEwCITUuIsxk9PlVlwIiiI8pnJEhordQ= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= diff --git a/internal/catalogd/graphql/discovery_test.go b/internal/catalogd/graphql/discovery_test.go new file mode 100644 index 000000000..f441dcc29 --- /dev/null +++ b/internal/catalogd/graphql/discovery_test.go @@ -0,0 +1,498 @@ +package graphql + +import ( + "testing" + + "github.com/operator-framework/operator-registry/alpha/declcfg" +) + +func TestDiscoverSchemaFromMetas_CoreLogic(t *testing.T) { + // Create test metas simulating real catalog data + testMetas := []*declcfg.Meta{ + { + Schema: declcfg.SchemaPackage, + Package: "test-package", + Name: "test-package", + Blob: []byte(`{ + "schema": "olm.package", + "name": "test-package", + "defaultChannel": "stable", + "icon": { + "base64data": "...", + "mediatype": "image/svg+xml" + }, + "description": "A test package" + }`), + }, + { + Schema: declcfg.SchemaChannel, + Package: "test-package", + Name: "stable", + Blob: []byte(`{ + "schema": "olm.channel", + "name": "stable", + "package": "test-package", + "entries": [ + {"name": "test-package.v1.0.0"}, + {"name": "test-package.v1.1.0", "replaces": "test-package.v1.0.0"} + ] + }`), + }, + { + Schema: declcfg.SchemaBundle, + Package: "test-package", + Name: "test-package.v1.0.0", + Blob: []byte(`{ + "schema": "olm.bundle", + "name": "test-package.v1.0.0", + "package": "test-package", + "image": "registry.io/test-package@sha256:abc123", + "properties": [ + { + "type": "olm.package", + "value": { + "packageName": "test-package", + "version": "1.0.0" + } + }, + { + "type": "olm.gvk", + "value": { + "group": "example.com", + "version": "v1", + "kind": "TestResource" + } + } + ], + "relatedImages": [ + { + "name": "operator", + "image": "registry.io/test-package@sha256:abc123" + } + ] + }`), + }, + } + + // Test schema discovery + catalogSchema, err := DiscoverSchemaFromMetas(testMetas) + if err != nil { + t.Fatalf("Failed to discover schema: %v", err) + } + + // Validate discovered schemas + if len(catalogSchema.Schemas) != 3 { + t.Errorf("Expected 3 schemas, got %d", len(catalogSchema.Schemas)) + } + + // Test package schema + packageSchema, ok := catalogSchema.Schemas[declcfg.SchemaPackage] + if !ok { + t.Error("Package schema not discovered") + } else { + if packageSchema.TotalObjects != 1 { + t.Errorf("Expected 1 package object, got %d", packageSchema.TotalObjects) + } + if len(packageSchema.Fields) == 0 { + t.Error("No fields discovered for package schema") + } + + // Check for expected fields + expectedFields := []string{"name", "defaultChannel", "icon", "description", "schema"} + for _, field := range expectedFields { + graphqlField := remapFieldName(field) + if _, exists := packageSchema.Fields[graphqlField]; !exists { + t.Errorf("Expected field %s (mapped to %s) not found in package schema", field, graphqlField) + } + } + } + + // Test bundle schema with properties + bundleSchema, ok := catalogSchema.Schemas[declcfg.SchemaBundle] + if !ok { + t.Error("Bundle schema not discovered") + } else { + if bundleSchema.TotalObjects != 1 { + t.Errorf("Expected 1 bundle object, got %d", bundleSchema.TotalObjects) + } + + // Check property types discovery + if len(bundleSchema.PropertyTypes) == 0 { + t.Error("No property types discovered for bundle schema") + } + + // Check for specific property types + if olmPackage, exists := bundleSchema.PropertyTypes["olm.package"]; !exists { + t.Error("olm.package property type not discovered") + } else { + expectedPropertyFields := []string{"packageName", "version"} + for _, field := range expectedPropertyFields { + graphqlField := remapFieldName(field) + if _, exists := olmPackage[graphqlField]; !exists { + t.Errorf("Expected property field %s not found in olm.package", graphqlField) + } + } + } + + if olmGvk, exists := bundleSchema.PropertyTypes["olm.gvk"]; !exists { + t.Error("olm.gvk property type not discovered") + } else { + expectedGvkFields := []string{"group", "version", "kind"} + for _, field := range expectedGvkFields { + graphqlField := remapFieldName(field) + if _, exists := olmGvk[graphqlField]; !exists { + t.Errorf("Expected GVK field %s not found in olm.gvk", graphqlField) + } + } + } + } + + // Test channel schema + channelSchema, ok := catalogSchema.Schemas[declcfg.SchemaChannel] + if !ok { + t.Error("Channel schema not discovered") + } else { + if channelSchema.TotalObjects != 1 { + t.Errorf("Expected 1 channel object, got %d", channelSchema.TotalObjects) + } + } +} + +func TestFieldNameRemapping_EdgeCases(t *testing.T) { + testCases := []struct { + input string + expected string + }{ + {"name", "name"}, + {"package-name", "packageName"}, + {"default_channel", "defaultChannel"}, + {"related-images", "relatedImages"}, + {"", "value"}, + {"123invalid", "field_123invalid"}, + {"my.field.name", "myFieldName"}, + {"CamelCase", "camelCase"}, + {"UPPERCASE", "uppercase"}, + {"mixed_case-field.name", "mixedCaseFieldName"}, + {"spec.template.spec.containers", "specTemplateSpecContainers"}, + {"metadata.annotations.description", "metadataAnnotationsDescription"}, + {"operators.operatorframework.io/bundle.channels.v1", "operatorsOperatorframeworkIoBundleChannelsV1"}, + {"---", "field_"}, + {"123", "field_123"}, + {"field@#$%", "fieldField"}, + } + + for _, tc := range testCases { + result := remapFieldName(tc.input) + if result != tc.expected { + t.Errorf("remapFieldName(%q) = %q, expected %q", tc.input, result, tc.expected) + } + } +} + +func TestSanitizeTypeName_EdgeCases(t *testing.T) { + testCases := []struct { + input string + expected string + }{ + {"olm.package", "OlmPackage"}, + {"olm.gvk", "OlmGvk"}, + {"some-type", "SomeType"}, + {"complex.type-name_here", "ComplexTypeNameHere"}, + {"", "Unknown"}, + {"123invalid", "Invalid"}, + {"operators.operatorframework.io/bundle.channels.v1", "OperatorsOperatorframeworkIoBundleChannelsV1"}, + {"@#$%", "Unknown"}, + {"_____", "Unknown"}, + {"ABC", "Abc"}, + {"lowercase", "Lowercase"}, + } + + for _, tc := range testCases { + result := sanitizeTypeName(tc.input) + if result != tc.expected { + t.Errorf("sanitizeTypeName(%q) = %q, expected %q", tc.input, result, tc.expected) + } + } +} + +func TestAnalyzeJSONObject_FieldTypes(t *testing.T) { + testObj := map[string]interface{}{ + "name": "test-package", + "version": "1.0.0", + "count": 42, + "active": true, + "tags": []interface{}{"tag1", "tag2"}, + "numbers": []interface{}{1, 2, 3}, + "nested": map[string]interface{}{"key": "value"}, + "nullField": nil, + "emptyArray": []interface{}{}, + "floatValue": 3.14, + "mixedArray": []interface{}{"string", 123, true}, + } + + info := &SchemaInfo{ + Fields: make(map[string]*FieldInfo), + } + + analyzeJSONObject(testObj, info) + + // Check that all fields were discovered + expectedFieldCount := len(testObj) + if len(info.Fields) != expectedFieldCount { + t.Errorf("Expected %d fields discovered, got %d", expectedFieldCount, len(info.Fields)) + } + + // Check specific field types + testField := func(origName string, shouldBeArray bool) { + graphqlField := remapFieldName(origName) + fieldInfo, exists := info.Fields[graphqlField] + if !exists { + t.Errorf("Field %s (mapped to %s) not discovered", origName, graphqlField) + return + } + + if fieldInfo.IsArray != shouldBeArray { + t.Errorf("Field %s array status: expected %v, got %v", graphqlField, shouldBeArray, fieldInfo.IsArray) + } + + if len(fieldInfo.SampleValues) == 0 { + t.Errorf("No sample values recorded for field %s", graphqlField) + } + } + + testField("name", false) + testField("count", false) + testField("active", false) + testField("tags", true) + testField("numbers", true) + testField("emptyArray", true) +} + +func TestBundlePropertiesAnalysis_ComprehensiveTypes(t *testing.T) { + bundleObj := map[string]interface{}{ + "name": "test-bundle", + "package": "test-package", + "properties": []interface{}{ + map[string]interface{}{ + "type": "olm.package", + "value": map[string]interface{}{ + "packageName": "test-package", + "version": "1.0.0", + }, + }, + map[string]interface{}{ + "type": "olm.gvk", + "value": map[string]interface{}{ + "group": "example.com", + "version": "v1", + "kind": "TestResource", + }, + }, + map[string]interface{}{ + "type": "olm.csv.metadata", + "value": map[string]interface{}{ + "name": "test-operator", + "namespace": "test-namespace", + "annotations": map[string]interface{}{ + "description": "A test operator", + }, + }, + }, + map[string]interface{}{ + "type": "olm.bundle.object", + "value": map[string]interface{}{ + "ref": "objects/test.yaml", + "data": map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "config", + }, + }, + }, + }, + }, + } + + info := &SchemaInfo{ + PropertyTypes: make(map[string]map[string]*FieldInfo), + } + + analyzeBundleProperties(bundleObj, info) + + // Check that property types were discovered + expectedPropertyTypes := []string{"olm.package", "olm.gvk", "olm.csv.metadata", "olm.bundle.object"} + for _, propType := range expectedPropertyTypes { + if _, exists := info.PropertyTypes[propType]; !exists { + t.Errorf("Property type %s not discovered", propType) + } + } + + // Check olm.package fields + if olmPackage, exists := info.PropertyTypes["olm.package"]; exists { + expectedFields := []string{"packageName", "version"} + for _, field := range expectedFields { + if _, exists := olmPackage[field]; !exists { + t.Errorf("Field %s not found in olm.package property type", field) + } + } + } + + // Check olm.gvk fields + if olmGvk, exists := info.PropertyTypes["olm.gvk"]; exists { + expectedFields := []string{"group", "version", "kind"} + for _, field := range expectedFields { + if _, exists := olmGvk[field]; !exists { + t.Errorf("Field %s not found in olm.gvk property type", field) + } + } + } + + // Check that nested objects are handled (annotations in csv.metadata) + if csvMetadata, exists := info.PropertyTypes["olm.csv.metadata"]; exists { + expectedFields := []string{"name", "namespace", "annotations"} + for _, field := range expectedFields { + if _, exists := csvMetadata[field]; !exists { + t.Errorf("Field %s not found in olm.csv.metadata property type", field) + } + } + } + + // Check bundle object type + if bundleObject, exists := info.PropertyTypes["olm.bundle.object"]; exists { + expectedFields := []string{"ref", "data"} + for _, field := range expectedFields { + if _, exists := bundleObject[field]; !exists { + t.Errorf("Field %s not found in olm.bundle.object property type", field) + } + } + } +} + +func TestSchemaDiscovery_RealWorldExample(t *testing.T) { + // Test with more realistic catalog data + packageMeta := &declcfg.Meta{ + Schema: declcfg.SchemaPackage, + Package: "nginx-ingress-operator", + Name: "nginx-ingress-operator", + Blob: []byte(`{ + "defaultChannel": "alpha", + "icon": { + "base64data": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg==", + "mediatype": "image/png" + }, + "name": "nginx-ingress-operator", + "schema": "olm.package" + }`), + } + + channelMeta := &declcfg.Meta{ + Schema: declcfg.SchemaChannel, + Package: "nginx-ingress-operator", + Name: "alpha", + Blob: []byte(`{ + "entries": [ + {"name": "nginx-ingress-operator.v0.0.1"}, + {"name": "nginx-ingress-operator.v0.0.2", "replaces": "nginx-ingress-operator.v0.0.1"} + ], + "name": "alpha", + "package": "nginx-ingress-operator", + "schema": "olm.channel" + }`), + } + + bundleMeta := &declcfg.Meta{ + Schema: declcfg.SchemaBundle, + Package: "nginx-ingress-operator", + Name: "nginx-ingress-operator.v0.0.2", + Blob: []byte(`{ + "image": "quay.io/operatorhubio/nginx-ingress-operator@sha256:abc123", + "name": "nginx-ingress-operator.v0.0.2", + "package": "nginx-ingress-operator", + "properties": [ + { + "type": "olm.package", + "value": { + "packageName": "nginx-ingress-operator", + "version": "0.0.2" + } + }, + { + "type": "olm.gvk", + "value": { + "group": "k8s.nginx.org", + "kind": "NginxIngress", + "version": "v1" + } + }, + { + "type": "olm.bundle.mediatype", + "value": "registry+v1" + } + ], + "relatedImages": [ + { + "image": "quay.io/operatorhubio/nginx-ingress-operator@sha256:abc123", + "name": "operator" + } + ], + "schema": "olm.bundle" + }`), + } + + testMetas := []*declcfg.Meta{packageMeta, channelMeta, bundleMeta} + + catalogSchema, err := DiscoverSchemaFromMetas(testMetas) + if err != nil { + t.Fatalf("Failed to discover schema: %v", err) + } + + // Validate the results + if len(catalogSchema.Schemas) != 3 { + t.Errorf("Expected 3 schemas, got %d", len(catalogSchema.Schemas)) + } + + // Check bundle property discovery + bundleSchema := catalogSchema.Schemas[declcfg.SchemaBundle] + if bundleSchema == nil { + t.Fatal("Bundle schema not found") + } + + expectedPropertyTypes := map[string][]string{ + "olm.package": {"packageName", "version"}, + "olm.gvk": {"group", "kind", "version"}, + "olm.bundle.mediatype": {}, // This is a string value, no nested fields + } + + for propType, expectedFields := range expectedPropertyTypes { + if propFields, exists := bundleSchema.PropertyTypes[propType]; exists { + for _, expectedField := range expectedFields { + if _, fieldExists := propFields[expectedField]; !fieldExists { + t.Errorf("Expected field %s not found in property type %s", expectedField, propType) + } + } + } else if len(expectedFields) > 0 { + // Only error if we expected fields (mediatype is a string, so no fields expected) + t.Errorf("Property type %s not discovered", propType) + } + } + + // Validate that complex fields are properly mapped + packageSchema := catalogSchema.Schemas[declcfg.SchemaPackage] + if packageSchema == nil { + t.Fatal("Package schema not found") + } + + // Check that icon field exists (it's a complex object) + if _, exists := packageSchema.Fields["icon"]; !exists { + t.Error("Icon field not discovered in package schema") + } + + // Validate total object counts + if packageSchema.TotalObjects != 1 { + t.Errorf("Expected 1 package, got %d", packageSchema.TotalObjects) + } + if bundleSchema.TotalObjects != 1 { + t.Errorf("Expected 1 bundle, got %d", bundleSchema.TotalObjects) + } +} diff --git a/internal/catalogd/graphql/graphql.go b/internal/catalogd/graphql/graphql.go new file mode 100644 index 000000000..9034fdd15 --- /dev/null +++ b/internal/catalogd/graphql/graphql.go @@ -0,0 +1,692 @@ +package graphql + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "net/http" + "reflect" + "regexp" + "strings" + + "github.com/graphql-go/graphql" + "github.com/operator-framework/operator-registry/alpha/declcfg" +) + +var schema graphql.Schema + +// FieldInfo represents discovered field information +type FieldInfo struct { + Name string + GraphQLType graphql.Type + JSONType reflect.Kind + IsArray bool + SampleValues []interface{} +} + +// SchemaInfo holds discovered schema information +type SchemaInfo struct { + Fields map[string]*FieldInfo + PropertyTypes map[string]map[string]*FieldInfo // For bundle properties: type -> field -> info + TotalObjects int + SampleObject map[string]interface{} +} + +// CatalogSchema holds the complete discovered schema +type CatalogSchema struct { + Schemas map[string]*SchemaInfo // schema name -> info +} + +// DynamicSchema holds the generated GraphQL schema and metadata +type DynamicSchema struct { + Schema graphql.Schema + CatalogSchema *CatalogSchema + MetasBySchema map[string][]*declcfg.Meta // For resolvers +} + +// remapFieldName converts field names to valid GraphQL camelCase identifiers +func remapFieldName(name string) string { + // Handle empty names + if name == "" { + return "value" + } + + // Replace invalid characters with underscores + re := regexp.MustCompile(`[^a-zA-Z0-9_]`) + clean := re.ReplaceAllString(name, "_") + + // Split on underscores and camelCase + parts := strings.Split(clean, "_") + result := "" + for i, part := range parts { + if part == "" { + continue + } + if i == 0 { + result = strings.ToLower(part) + } else { + result += strings.Title(strings.ToLower(part)) + } + } + + // Ensure it starts with a letter + if result == "" || !regexp.MustCompile(`^[a-zA-Z]`).MatchString(result) { + result = "field_" + result + } + + return result +} + +// jsonTypeToGraphQL maps JSON types to GraphQL types +func jsonTypeToGraphQL(jsonType reflect.Kind, isArray bool) graphql.Type { + var baseType graphql.Type + + switch jsonType { + case reflect.String: + baseType = graphql.String + case reflect.Bool: + baseType = graphql.Boolean + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + baseType = graphql.Int + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + baseType = graphql.Int + case reflect.Float32, reflect.Float64: + baseType = graphql.Float + default: + // For complex types, use String as fallback (JSON serialized) + baseType = graphql.String + } + + if isArray { + return graphql.NewList(baseType) + } + return baseType +} + +// analyzeJSONObject analyzes a JSON object and extracts field information +func analyzeJSONObject(obj map[string]interface{}, info *SchemaInfo) { + if info.Fields == nil { + info.Fields = make(map[string]*FieldInfo) + } + + for key, value := range obj { + fieldName := remapFieldName(key) + + // Determine type and array status + isArray := false + var jsonType reflect.Kind + var sampleValue interface{} = value + + if value == nil { + jsonType = reflect.String // Default for null values + } else { + valueType := reflect.TypeOf(value) + if valueType.Kind() == reflect.Slice { + isArray = true + slice := reflect.ValueOf(value) + if slice.Len() > 0 { + firstElem := slice.Index(0).Interface() + if firstElem != nil { + jsonType = reflect.TypeOf(firstElem).Kind() + sampleValue = firstElem + } else { + jsonType = reflect.String + } + } else { + jsonType = reflect.String + } + } else { + jsonType = valueType.Kind() + } + } + + // Update or create field info + if existing, ok := info.Fields[fieldName]; ok { + // Add sample value if not already present + existing.SampleValues = appendUnique(existing.SampleValues, sampleValue) + } else { + info.Fields[fieldName] = &FieldInfo{ + Name: fieldName, + GraphQLType: jsonTypeToGraphQL(jsonType, isArray), + JSONType: jsonType, + IsArray: isArray, + SampleValues: []interface{}{sampleValue}, + } + } + } +} + +// analyzeBundleProperties analyzes bundle properties for union type creation +func analyzeBundleProperties(obj map[string]interface{}, info *SchemaInfo) { + if info.PropertyTypes == nil { + info.PropertyTypes = make(map[string]map[string]*FieldInfo) + } + + properties, ok := obj["properties"] + if !ok { + return + } + + propsSlice, ok := properties.([]interface{}) + if !ok { + return + } + + for _, prop := range propsSlice { + propObj, ok := prop.(map[string]interface{}) + if !ok { + continue + } + + propType, ok := propObj["type"].(string) + if !ok { + continue + } + + value, ok := propObj["value"] + if !ok { + continue + } + + // Analyze the value structure for this property type + if valueObj, ok := value.(map[string]interface{}); ok { + if info.PropertyTypes[propType] == nil { + info.PropertyTypes[propType] = make(map[string]*FieldInfo) + } + + for key, val := range valueObj { + fieldName := remapFieldName(key) + isArray := false + var jsonType reflect.Kind + + if val == nil { + jsonType = reflect.String + } else { + valType := reflect.TypeOf(val) + if valType.Kind() == reflect.Slice { + isArray = true + slice := reflect.ValueOf(val) + if slice.Len() > 0 { + firstElem := slice.Index(0).Interface() + if firstElem != nil { + jsonType = reflect.TypeOf(firstElem).Kind() + } else { + jsonType = reflect.String + } + } else { + jsonType = reflect.String + } + } else { + jsonType = valType.Kind() + } + } + + if existing, ok := info.PropertyTypes[propType][fieldName]; ok { + existing.SampleValues = appendUnique(existing.SampleValues, val) + } else { + info.PropertyTypes[propType][fieldName] = &FieldInfo{ + Name: fieldName, + GraphQLType: jsonTypeToGraphQL(jsonType, isArray), + JSONType: jsonType, + IsArray: isArray, + SampleValues: []interface{}{val}, + } + } + } + } + } +} + +// appendUnique adds a value to slice if not already present +func appendUnique(slice []interface{}, value interface{}) []interface{} { + for _, existing := range slice { + if reflect.DeepEqual(existing, value) { + return slice + } + } + return append(slice, value) +} + +// DiscoverSchemaFromMetas analyzes Meta objects to discover schema structure +func DiscoverSchemaFromMetas(metas []*declcfg.Meta) (*CatalogSchema, error) { + catalogSchema := &CatalogSchema{ + Schemas: make(map[string]*SchemaInfo), + } + + // Process each meta object + for _, meta := range metas { + if meta.Schema == "" { + continue + } + + // Ensure schema info exists + if catalogSchema.Schemas[meta.Schema] == nil { + catalogSchema.Schemas[meta.Schema] = &SchemaInfo{ + Fields: make(map[string]*FieldInfo), + PropertyTypes: make(map[string]map[string]*FieldInfo), + TotalObjects: 0, + } + } + + info := catalogSchema.Schemas[meta.Schema] + info.TotalObjects++ + + // Parse the JSON blob + var obj map[string]interface{} + if err := json.Unmarshal(meta.Blob, &obj); err != nil { + continue // Skip malformed objects + } + + // Store a sample object for reference + if info.SampleObject == nil { + info.SampleObject = obj + } + + // Analyze general fields + analyzeJSONObject(obj, info) + + // Special handling for bundle properties + if meta.Schema == declcfg.SchemaBundle { + analyzeBundleProperties(obj, info) + } + } + + return catalogSchema, nil +} + +// buildGraphQLObjectType creates a GraphQL object type from discovered field info +func buildGraphQLObjectType(schemaName string, info *SchemaInfo) *graphql.Object { + fields := graphql.Fields{} + + // Add discovered fields + for fieldName, fieldInfo := range info.Fields { + fields[fieldName] = &graphql.Field{ + Type: fieldInfo.GraphQLType, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + if source, ok := p.Source.(map[string]interface{}); ok { + // Find the original JSON key for this GraphQL field + for origKey, value := range source { + if remapFieldName(origKey) == fieldName { + return value, nil + } + } + } + return nil, nil + }, + } + } + + // Special handling for bundle properties + if schemaName == declcfg.SchemaBundle && len(info.PropertyTypes) > 0 { + fields["properties"] = &graphql.Field{ + Type: graphql.NewList(createBundlePropertyType(info.PropertyTypes)), + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + if source, ok := p.Source.(map[string]interface{}); ok { + if props, ok := source["properties"]; ok { + return props, nil + } + } + return nil, nil + }, + } + } + + return graphql.NewObject(graphql.ObjectConfig{ + Name: strings.Title(strings.ToLower(schemaName)), + Fields: fields, + }) +} + +// createBundlePropertyType creates a GraphQL type for bundle properties with union values +func createBundlePropertyType(propertyTypes map[string]map[string]*FieldInfo) *graphql.Object { + // Create union type for property values + var unionTypes []*graphql.Object + unionTypesMap := make(map[string]*graphql.Object) + + for propType, fields := range propertyTypes { + typeName := fmt.Sprintf("PropertyValue%s", sanitizeTypeName(propType)) + + valueFields := graphql.Fields{} + for fieldName, fieldInfo := range fields { + valueFields[fieldName] = &graphql.Field{ + Type: fieldInfo.GraphQLType, + } + } + + objType := graphql.NewObject(graphql.ObjectConfig{ + Name: typeName, + Fields: valueFields, + }) + + unionTypes = append(unionTypes, objType) + unionTypesMap[propType] = objType + } + + // Create union of all property value types + var valueUnion *graphql.Union + if len(unionTypes) > 0 { + valueUnion = graphql.NewUnion(graphql.UnionConfig{ + Name: "PropertyValue", + Types: unionTypes, + ResolveType: func(p graphql.ResolveTypeParams) *graphql.Object { + // Try to determine the type from the parent property's type field + if valueMap, ok := p.Value.(map[string]interface{}); ok { + // Look for type in parent context (property object should have type field) + if parent, ok := p.Context.Value("propertyType").(string); ok { + if objType, ok := unionTypesMap[parent]; ok { + return objType + } + } + // Fallback: use the first matching type + for _, objType := range unionTypesMap { + if len(valueMap) > 0 { + return objType + } + } + } + // Default to first type if available + if len(unionTypes) > 0 { + return unionTypes[0] + } + return nil + }, + }) + } + + // Create the bundle property object type + propertyFields := graphql.Fields{ + "type": &graphql.Field{Type: graphql.String}, + } + + if valueUnion != nil { + propertyFields["value"] = &graphql.Field{Type: valueUnion} + } else { + // Fallback to string if no union types + propertyFields["value"] = &graphql.Field{Type: graphql.String} + } + + return graphql.NewObject(graphql.ObjectConfig{ + Name: "BundleProperty", + Fields: propertyFields, + }) +} + +// sanitizeTypeName converts a property type to a valid GraphQL type name +func sanitizeTypeName(propType string) string { + // Remove dots and other invalid characters, capitalize words + re := regexp.MustCompile(`[^a-zA-Z0-9]`) + clean := re.ReplaceAllString(propType, "_") + parts := strings.Split(clean, "_") + + result := "" + for _, part := range parts { + if part != "" { + result += strings.Title(strings.ToLower(part)) + } + } + + if result == "" { + result = "Unknown" + } + + return result +} + +// BuildDynamicGraphQLSchema creates a complete GraphQL schema from discovered structure +func BuildDynamicGraphQLSchema(catalogSchema *CatalogSchema, metasBySchema map[string][]*declcfg.Meta) (*DynamicSchema, error) { + // Build GraphQL object types for each discovered schema + objectTypes := make(map[string]*graphql.Object) + + for schemaName, schemaInfo := range catalogSchema.Schemas { + objectTypes[schemaName] = buildGraphQLObjectType(schemaName, schemaInfo) + } + + // Create root query fields + queryFields := graphql.Fields{} + + for schemaName, objectType := range objectTypes { + fieldName := strings.ToLower(schemaName) + "s" // e.g., "bundles", "packages" + + queryFields[fieldName] = &graphql.Field{ + Type: graphql.NewList(objectType), + Args: graphql.FieldConfigArgument{ + "limit": &graphql.ArgumentConfig{ + Type: graphql.Int, + DefaultValue: 100, + Description: "Maximum number of items to return", + }, + "offset": &graphql.ArgumentConfig{ + Type: graphql.Int, + DefaultValue: 0, + Description: "Number of items to skip", + }, + }, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + // Get the schema name from the field name + currentSchemaName := "" + for sn := range catalogSchema.Schemas { + if strings.ToLower(sn)+"s" == p.Info.FieldName { + currentSchemaName = sn + break + } + } + + if currentSchemaName == "" { + return nil, fmt.Errorf("unknown schema for field %s", p.Info.FieldName) + } + + // Get metas for this schema + metas, ok := metasBySchema[currentSchemaName] + if !ok { + return []interface{}{}, nil + } + + // Parse arguments + limit, _ := p.Args["limit"].(int) + offset, _ := p.Args["offset"].(int) + + // Convert metas to JSON objects and apply pagination + var results []interface{} + for i, meta := range metas { + if i < offset { + continue + } + if len(results) >= limit { + break + } + + var obj map[string]interface{} + if err := json.Unmarshal(meta.Blob, &obj); err != nil { + continue // Skip malformed objects + } + results = append(results, obj) + } + + return results, nil + }, + } + } + + // Add summary field + queryFields["summary"] = &graphql.Field{ + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: "CatalogSummary", + Fields: graphql.Fields{ + "totalSchemas": &graphql.Field{Type: graphql.Int}, + "schemas": &graphql.Field{ + Type: graphql.NewList(graphql.NewObject(graphql.ObjectConfig{ + Name: "SchemaSummary", + Fields: graphql.Fields{ + "name": &graphql.Field{Type: graphql.String}, + "totalObjects": &graphql.Field{Type: graphql.Int}, + "totalFields": &graphql.Field{Type: graphql.Int}, + }, + })), + }, + }, + }), + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + schemas := []interface{}{} + for name, info := range catalogSchema.Schemas { + schemas = append(schemas, map[string]interface{}{ + "name": name, + "totalObjects": info.TotalObjects, + "totalFields": len(info.Fields), + }) + } + + return map[string]interface{}{ + "totalSchemas": len(catalogSchema.Schemas), + "schemas": schemas, + }, nil + }, + } + + // Create root query + rootQuery := graphql.NewObject(graphql.ObjectConfig{ + Name: "Query", + Fields: queryFields, + }) + + // Build the schema + schema, err := graphql.NewSchema(graphql.SchemaConfig{ + Query: rootQuery, + }) + if err != nil { + return nil, fmt.Errorf("failed to create GraphQL schema: %w", err) + } + + return &DynamicSchema{ + Schema: schema, + CatalogSchema: catalogSchema, + MetasBySchema: metasBySchema, + }, nil +} + +// LoadAndSummarizeCatalogDynamic loads FBC using WalkMetasReader and builds dynamic GraphQL schema +func LoadAndSummarizeCatalogDynamic(catalogFS fs.FS) error { + var metas []*declcfg.Meta + + // Collect all metas from the filesystem + err := declcfg.WalkMetasFS(context.Background(), catalogFS, func(path string, meta *declcfg.Meta, err error) error { + if err != nil { + return err + } + if meta != nil { + metas = append(metas, meta) + } + return nil + }) + if err != nil { + return fmt.Errorf("error walking catalog metas: %w", err) + } + + // Discover schema from collected metas + catalogSchema, err := DiscoverSchemaFromMetas(metas) + if err != nil { + return fmt.Errorf("error discovering schema: %w", err) + } + + // Organize metas by schema for resolvers + metasBySchema := make(map[string][]*declcfg.Meta) + for _, meta := range metas { + if meta.Schema != "" { + metasBySchema[meta.Schema] = append(metasBySchema[meta.Schema], meta) + } + } + + // Build dynamic GraphQL schema + dynamicSchema, err := BuildDynamicGraphQLSchema(catalogSchema, metasBySchema) + if err != nil { + return fmt.Errorf("error building GraphQL schema: %w", err) + } + + // Set the global schema for serving + schema = dynamicSchema.Schema + + // Print comprehensive summary + fmt.Printf("Dynamic GraphQL schema generation complete.\n") + fmt.Printf("Total schemas discovered: %d\n", len(catalogSchema.Schemas)) + + for schemaName, info := range catalogSchema.Schemas { + fmt.Printf("\nSchema: %s\n", schemaName) + fmt.Printf(" Objects: %d\n", info.TotalObjects) + fmt.Printf(" Fields: %d\n", len(info.Fields)) + + if schemaName == declcfg.SchemaBundle && len(info.PropertyTypes) > 0 { + fmt.Printf(" Property types: %d\n", len(info.PropertyTypes)) + for propType, fields := range info.PropertyTypes { + fmt.Printf(" - %s (%d fields)\n", propType, len(fields)) + } + } + + // Show sample fields + if len(info.Fields) > 0 { + fmt.Printf(" Sample fields: ") + count := 0 + for fieldName := range info.Fields { + if count > 0 { + fmt.Printf(", ") + } + fmt.Printf("%s", fieldName) + count++ + if count >= 5 { // Show first 5 fields + if len(info.Fields) > 5 { + fmt.Printf(", ...") + } + break + } + } + fmt.Printf("\n") + } + } + + fmt.Printf("\nGraphQL endpoints available:\n") + for schemaName := range catalogSchema.Schemas { + fmt.Printf(" - %ss\n", strings.ToLower(schemaName)) + } + fmt.Printf(" - summary\n") + + fmt.Printf("\nSample GraphQL query:\n") + fmt.Printf("{\n") + fmt.Printf(" summary {\n") + fmt.Printf(" totalSchemas\n") + fmt.Printf(" schemas { name totalObjects totalFields }\n") + fmt.Printf(" }\n") + if _, ok := catalogSchema.Schemas[declcfg.SchemaBundle]; ok { + fmt.Printf(" bundles(limit: 5) { name package }\n") + } + if _, ok := catalogSchema.Schemas[declcfg.SchemaPackage]; ok { + fmt.Printf(" packages(limit: 5) { name }\n") + } + fmt.Printf("}\n") + + return nil +} + +// ServeGraphQL starts an HTTPS server with the /graphql endpoint. +func ServeGraphQL(listenAddr, certPath, keyPath string) error { + http.HandleFunc("/graphql", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Only POST is allowed", http.StatusMethodNotAllowed) + return + } + var params struct { + Query string `json:"query"` + } + if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + result := graphql.Do(graphql.Params{ + Schema: schema, + RequestString: params.Query, + }) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(result) + }) + + fmt.Printf("Serving GraphQL endpoint at https://%s/graphql\n", listenAddr) + return http.ListenAndServeTLS(listenAddr, certPath, keyPath, nil) +} diff --git a/internal/catalogd/graphql/graphql_test.go b/internal/catalogd/graphql/graphql_test.go new file mode 100644 index 000000000..ff042597f --- /dev/null +++ b/internal/catalogd/graphql/graphql_test.go @@ -0,0 +1,333 @@ +package graphql + +import ( + "testing" + + "github.com/operator-framework/operator-registry/alpha/declcfg" +) + +func TestDiscoverSchemaFromMetas(t *testing.T) { + // Create test metas simulating real catalog data + testMetas := []*declcfg.Meta{ + { + Schema: declcfg.SchemaPackage, + Package: "test-package", + Name: "test-package", + Blob: []byte(`{ + "schema": "olm.package", + "name": "test-package", + "defaultChannel": "stable", + "icon": { + "base64data": "...", + "mediatype": "image/svg+xml" + }, + "description": "A test package" + }`), + }, + { + Schema: declcfg.SchemaChannel, + Package: "test-package", + Name: "stable", + Blob: []byte(`{ + "schema": "olm.channel", + "name": "stable", + "package": "test-package", + "entries": [ + {"name": "test-package.v1.0.0"}, + {"name": "test-package.v1.1.0", "replaces": "test-package.v1.0.0"} + ] + }`), + }, + { + Schema: declcfg.SchemaBundle, + Package: "test-package", + Name: "test-package.v1.0.0", + Blob: []byte(`{ + "schema": "olm.bundle", + "name": "test-package.v1.0.0", + "package": "test-package", + "image": "registry.io/test-package@sha256:abc123", + "properties": [ + { + "type": "olm.package", + "value": { + "packageName": "test-package", + "version": "1.0.0" + } + }, + { + "type": "olm.gvk", + "value": { + "group": "example.com", + "version": "v1", + "kind": "TestResource" + } + } + ], + "relatedImages": [ + { + "name": "operator", + "image": "registry.io/test-package@sha256:abc123" + } + ] + }`), + }, + } + + // Test schema discovery + catalogSchema, err := DiscoverSchemaFromMetas(testMetas) + if err != nil { + t.Fatalf("Failed to discover schema: %v", err) + } + + // Validate discovered schemas + if len(catalogSchema.Schemas) != 3 { + t.Errorf("Expected 3 schemas, got %d", len(catalogSchema.Schemas)) + } + + // Test package schema + packageSchema, ok := catalogSchema.Schemas[declcfg.SchemaPackage] + if !ok { + t.Error("Package schema not discovered") + } else { + if packageSchema.TotalObjects != 1 { + t.Errorf("Expected 1 package object, got %d", packageSchema.TotalObjects) + } + if len(packageSchema.Fields) == 0 { + t.Error("No fields discovered for package schema") + } + + // Check for expected fields + expectedFields := []string{"name", "defaultChannel", "icon", "description", "schema"} + for _, field := range expectedFields { + graphqlField := remapFieldName(field) + if _, exists := packageSchema.Fields[graphqlField]; !exists { + t.Errorf("Expected field %s (mapped to %s) not found in package schema", field, graphqlField) + } + } + } + + // Test bundle schema with properties + bundleSchema, ok := catalogSchema.Schemas[declcfg.SchemaBundle] + if !ok { + t.Error("Bundle schema not discovered") + } else { + if bundleSchema.TotalObjects != 1 { + t.Errorf("Expected 1 bundle object, got %d", bundleSchema.TotalObjects) + } + + // Check property types discovery + if len(bundleSchema.PropertyTypes) == 0 { + t.Error("No property types discovered for bundle schema") + } + + // Check for specific property types + if olmPackage, exists := bundleSchema.PropertyTypes["olm.package"]; !exists { + t.Error("olm.package property type not discovered") + } else { + expectedPropertyFields := []string{"packageName", "version"} + for _, field := range expectedPropertyFields { + graphqlField := remapFieldName(field) + if _, exists := olmPackage[graphqlField]; !exists { + t.Errorf("Expected property field %s not found in olm.package", graphqlField) + } + } + } + + if olmGvk, exists := bundleSchema.PropertyTypes["olm.gvk"]; !exists { + t.Error("olm.gvk property type not discovered") + } else { + expectedGvkFields := []string{"group", "version", "kind"} + for _, field := range expectedGvkFields { + graphqlField := remapFieldName(field) + if _, exists := olmGvk[graphqlField]; !exists { + t.Errorf("Expected GVK field %s not found in olm.gvk", graphqlField) + } + } + } + } + + // Test channel schema + channelSchema, ok := catalogSchema.Schemas[declcfg.SchemaChannel] + if !ok { + t.Error("Channel schema not discovered") + } else { + if channelSchema.TotalObjects != 1 { + t.Errorf("Expected 1 channel object, got %d", channelSchema.TotalObjects) + } + } +} + +func TestFieldNameRemapping(t *testing.T) { + testCases := []struct { + input string + expected string + }{ + {"name", "name"}, + {"package-name", "packageName"}, + {"default_channel", "defaultChannel"}, + {"related-images", "relatedImages"}, + {"", "value"}, + {"123invalid", "field_123invalid"}, + {"my.field.name", "myFieldName"}, + {"CamelCase", "camelCase"}, + {"UPPERCASE", "uppercase"}, + {"mixed_case-field.name", "mixedCaseFieldName"}, + } + + for _, tc := range testCases { + result := remapFieldName(tc.input) + if result != tc.expected { + t.Errorf("remapFieldName(%q) = %q, expected %q", tc.input, result, tc.expected) + } + } +} + +func TestSanitizeTypeName(t *testing.T) { + testCases := []struct { + input string + expected string + }{ + {"olm.package", "OlmPackage"}, + {"olm.gvk", "OlmGvk"}, + {"some-type", "SomeType"}, + {"complex.type-name_here", "ComplexTypeNameHere"}, + {"", "Unknown"}, + {"123invalid", "Invalid"}, + } + + for _, tc := range testCases { + result := sanitizeTypeName(tc.input) + if result != tc.expected { + t.Errorf("sanitizeTypeName(%q) = %q, expected %q", tc.input, result, tc.expected) + } + } +} + +func TestAnalyzeJSONObject(t *testing.T) { + testObj := map[string]interface{}{ + "name": "test-package", + "version": "1.0.0", + "count": 42, + "active": true, + "tags": []interface{}{"tag1", "tag2"}, + "numbers": []interface{}{1, 2, 3}, + "nested": map[string]interface{}{"key": "value"}, + "nullField": nil, + "emptyArray": []interface{}{}, + } + + info := &SchemaInfo{ + Fields: make(map[string]*FieldInfo), + } + + analyzeJSONObject(testObj, info) + + // Check that all fields were discovered + expectedFields := map[string]string{ + "name": "string", + "version": "string", + "count": "int", + "active": "bool", + "tags": "[]string", + "numbers": "[]int", + "nested": "string", // Complex objects become strings + "nullField": "string", // Null becomes string + "emptyArray": "[]string", + } + + for origField, expectedType := range expectedFields { + graphqlField := remapFieldName(origField) + fieldInfo, exists := info.Fields[graphqlField] + if !exists { + t.Errorf("Field %s (mapped to %s) not discovered", origField, graphqlField) + continue + } + + // Type checking would require GraphQL types, so we just check that it was analyzed + if len(fieldInfo.SampleValues) == 0 { + t.Errorf("No sample values recorded for field %s", graphqlField) + } + + _ = expectedType // We can't easily test GraphQL types without the library + } +} + +// TestBundlePropertiesAnalysis tests the analysis of complex bundle properties +func TestBundlePropertiesAnalysis(t *testing.T) { + bundleObj := map[string]interface{}{ + "name": "test-bundle", + "package": "test-package", + "properties": []interface{}{ + map[string]interface{}{ + "type": "olm.package", + "value": map[string]interface{}{ + "packageName": "test-package", + "version": "1.0.0", + }, + }, + map[string]interface{}{ + "type": "olm.gvk", + "value": map[string]interface{}{ + "group": "example.com", + "version": "v1", + "kind": "TestResource", + }, + }, + map[string]interface{}{ + "type": "olm.csv.metadata", + "value": map[string]interface{}{ + "name": "test-operator", + "namespace": "test-namespace", + "annotations": map[string]interface{}{ + "description": "A test operator", + }, + }, + }, + }, + } + + info := &SchemaInfo{ + PropertyTypes: make(map[string]map[string]*FieldInfo), + } + + analyzeBundleProperties(bundleObj, info) + + // Check that property types were discovered + expectedPropertyTypes := []string{"olm.package", "olm.gvk", "olm.csv.metadata"} + for _, propType := range expectedPropertyTypes { + if _, exists := info.PropertyTypes[propType]; !exists { + t.Errorf("Property type %s not discovered", propType) + } + } + + // Check olm.package fields + if olmPackage, exists := info.PropertyTypes["olm.package"]; exists { + expectedFields := []string{"packageName", "version"} + for _, field := range expectedFields { + if _, exists := olmPackage[field]; !exists { + t.Errorf("Field %s not found in olm.package property type", field) + } + } + } + + // Check olm.gvk fields + if olmGvk, exists := info.PropertyTypes["olm.gvk"]; exists { + expectedFields := []string{"group", "version", "kind"} + for _, field := range expectedFields { + if _, exists := olmGvk[field]; !exists { + t.Errorf("Field %s not found in olm.gvk property type", field) + } + } + } + + // Check that nested objects are handled (annotations in csv.metadata) + if csvMetadata, exists := info.PropertyTypes["olm.csv.metadata"]; exists { + expectedFields := []string{"name", "namespace", "annotations"} + for _, field := range expectedFields { + if _, exists := csvMetadata[field]; !exists { + t.Errorf("Field %s not found in olm.csv.metadata property type", field) + } + } + } +} diff --git a/internal/catalogd/graphql/sample-queries.txt b/internal/catalogd/graphql/sample-queries.txt new file mode 100644 index 000000000..93594ae08 --- /dev/null +++ b/internal/catalogd/graphql/sample-queries.txt @@ -0,0 +1,412 @@ +GraphQL Query Examples for Catalog Data +========================================== + +This file contains common GraphQL queries for examining catalog objects +served via the /graphql endpoint. The dynamic schema adapts to your catalog +structure, so field availability may vary based on your specific data. + +Basic Summary Queries +-------------------- + +# Get overall catalog summary +{ + summary { + totalSchemas + schemas { + name + totalObjects + totalFields + } + } +} + +# Get schema information with more details +{ + summary { + totalSchemas + schemas { + name + totalObjects + totalFields + } + } +} + +Basic Object Queries +------------------- + +# Get first 10 packages +{ + packages(limit: 10) { + name + defaultChannel + icon + description + } +} + +# Get packages with pagination +{ + packages(limit: 5, offset: 10) { + name + defaultChannel + description + } +} + +# Get first 10 bundles with basic info +{ + bundles(limit: 10) { + name + package + version + image + skipRange + } +} + +# Get channels +{ + channels(limit: 10) { + name + package + entries + } +} + +Bundle Property Queries +---------------------- + +# Get bundles with all properties +{ + bundles(limit: 5) { + name + package + version + properties { + type + value { + ... on PropertyValueFeaturesOperatorsOpenshiftIo { + disconnected + cnf + cni + csi + fips + proxy + tlsProfiles + tokenAuthentication + } + ... on PropertyValueOlmGvk { + group + version + kind + } + ... on PropertyValueOlmPackage { + packageName + version + } + ... on PropertyValueOlmSkips { + value + } + ... on PropertyValueOlmSkipRange { + value + } + } + } + } +} + +# Specific query for OpenShift features properties +{ + bundles(limit: 20) { + name + package + version + properties { + type + value { + ... on PropertyValueFeaturesOperatorsOpenshiftIo { + disconnected + cnf + cni + csi + fips + proxy + tlsProfiles + tokenAuthentication + } + } + } + } +} + +# Query bundles with GVK properties +{ + bundles(limit: 10) { + name + package + properties { + type + value { + ... on PropertyValueOlmGvk { + group + version + kind + } + } + } + } +} + +# Query bundles with package requirements +{ + bundles(limit: 10) { + name + package + version + properties { + type + value { + ... on PropertyValueOlmPackageRequired { + packageName + versionRange + } + ... on PropertyValueOlmPackage { + packageName + version + } + } + } + } +} + +Complex Nested Queries +---------------------- + +# Comprehensive bundle analysis +{ + bundles(limit: 5) { + name + package + version + image + skipRange + replaces + properties { + type + value { + ... on PropertyValueFeaturesOperatorsOpenshiftIo { + disconnected + cnf + cni + csi + fips + proxy + tlsProfiles + tokenAuthentication + } + ... on PropertyValueOlmGvk { + group + version + kind + } + ... on PropertyValueOlmPackage { + packageName + version + } + ... on PropertyValueOlmSkips { + value + } + ... on PropertyValueOlmSkipRange { + value + } + ... on PropertyValueOlmBundle { + name + version + } + } + } + } +} + +# Search for specific OpenShift capabilities +{ + bundles(limit: 50) { + name + package + version + properties { + type + value { + ... on PropertyValueFeaturesOperatorsOpenshiftIo { + disconnected + fips + proxy + cnf + } + } + } + } +} + +Filtering and Analysis Queries +----------------------------- + +# Get packages and their default channels +{ + packages(limit: 20) { + name + defaultChannel + description + } + channels(limit: 30) { + name + package + entries + } +} + +# Get bundles with specific fields +{ + bundles(limit: 15) { + name + package + version + image + csvDescription + skipRange + replaces + relatedImages + } +} + +# Large dataset exploration +{ + bundles(limit: 100, offset: 0) { + name + package + version + } +} + +Schema Discovery Queries +----------------------- + +# Explore available schemas and their object counts +{ + summary { + totalSchemas + schemas { + name + totalObjects + totalFields + } + } +} + +# Minimal query to check endpoint availability +{ + summary { + totalSchemas + } +} + +Property Type Analysis +--------------------- + +# Focus on OpenShift features across all bundles +{ + bundles(limit: 100) { + name + package + properties { + type + value { + ... on PropertyValueFeaturesOperatorsOpenshiftIo { + disconnected + cnf + cni + csi + fips + proxy + tlsProfiles + tokenAuthentication + } + } + } + } +} + +# Check for specific property types +{ + bundles(limit: 50) { + name + package + properties { + type + # The value will be resolved based on the type + } + } +} + +Performance Queries +------------------ + +# Small result set for quick testing +{ + packages(limit: 3) { + name + } + bundles(limit: 3) { + name + package + } +} + +# Larger result set for comprehensive analysis +{ + bundles(limit: 200) { + name + package + version + properties { + type + value { + ... on PropertyValueFeaturesOperatorsOpenshiftIo { + disconnected + fips + } + ... on PropertyValueOlmGvk { + group + kind + } + } + } + } +} + +Notes on Usage +-------------- + +1. Property union types are dynamically generated based on your catalog data. + The examples above assume common property types like: + - features.operators.openshift.io + - olm.gvk + - olm.package + - olm.bundle + - olm.skips + - olm.skipRange + +2. Field names are automatically converted to camelCase for GraphQL compatibility. + Original JSON field names like "csv-description" become "csvDescription". + +3. Use the summary query first to understand what schemas and fields are + available in your specific catalog. + +4. Pagination is available on all list endpoints using limit and offset parameters. + +5. The property value union types allow type-safe access to different property + structures while maintaining flexibility for unknown property types. + +6. For large catalogs, start with small limit values and increase as needed + to avoid overwhelming responses. \ No newline at end of file From 9e65b3e159f97df72215b7ae061339faa633da47 Mon Sep 17 00:00:00 2001 From: grokspawn Date: Tue, 15 Jul 2025 11:08:24 -0500 Subject: [PATCH 2/3] integration with existing http service endpoint Signed-off-by: grokspawn --- internal/catalogd/graphql/README.md | 106 +++++++++++++++++++++++++ internal/catalogd/graphql/graphql.go | 47 +++-------- internal/catalogd/storage/localdir.go | 109 ++++++++++++++++++++++++++ 3 files changed, 225 insertions(+), 37 deletions(-) create mode 100644 internal/catalogd/graphql/README.md diff --git a/internal/catalogd/graphql/README.md b/internal/catalogd/graphql/README.md new file mode 100644 index 000000000..23f3d3068 --- /dev/null +++ b/internal/catalogd/graphql/README.md @@ -0,0 +1,106 @@ +# GraphQL Integration + +This package provides dynamic GraphQL schema generation for operator catalog data, integrated into the catalogd storage server. + +## Usage + +The GraphQL endpoint is now available as part of the catalogd storage server at: + +``` +{catalog}/api/v1/graphql +``` + +Where `{catalog}` is replaced by the actual catalog name at runtime. + +## Example Usage + +### Making a GraphQL Request + +```bash +curl -X POST http://localhost:8080/my-catalog/api/v1/graphql \ + -H "Content-Type: application/json" \ + -d '{ + "query": "{ summary { totalSchemas schemas { name totalObjects totalFields } } }" + }' +``` + +### Sample Queries + +#### Get catalog summary: +```graphql +{ + summary { + totalSchemas + schemas { + name + totalObjects + totalFields + } + } +} +``` + +#### Get bundles with pagination: +```graphql +{ + bundles(limit: 5, offset: 0) { + name + package + version + } +} +``` + +#### Get packages: +```graphql +{ + packages(limit: 10) { + name + description + } +} +``` + +#### Get bundle properties (union types): +```graphql +{ + bundles(limit: 5) { + name + properties { + type + value { + ... on PropertyValueFeaturesOperatorsOpenshiftIo { + disconnected + cnf + cni + csi + fips + } + } + } + } +} +``` + +## Features + +- **Dynamic Schema Generation**: Automatically discovers schema structure from catalog metadata +- **Union Types**: Supports complex bundle properties with variable structures +- **Pagination**: Built-in limit/offset pagination for all queries +- **Field Name Sanitization**: Converts JSON field names to valid GraphQL identifiers +- **Catalog-Specific**: Each catalog gets its own dynamically generated schema + +## Integration + +The GraphQL functionality is integrated into the `LocalDirV1` storage handler in `internal/catalogd/storage/localdir.go`: + +- `handleV1GraphQL()`: Handles POST requests to the GraphQL endpoint +- `createCatalogFS()`: Creates filesystem interface for catalog data +- `buildCatalogGraphQLSchema()`: Builds dynamic GraphQL schema for specific catalogs + +## Technical Details + +- Uses `declcfg.WalkMetasFS` to discover schema structure +- Generates GraphQL object types dynamically from discovered fields +- Creates union types for bundle properties with variable structures +- Supports all standard GraphQL features including introspection \ No newline at end of file diff --git a/internal/catalogd/graphql/graphql.go b/internal/catalogd/graphql/graphql.go index 9034fdd15..79036a881 100644 --- a/internal/catalogd/graphql/graphql.go +++ b/internal/catalogd/graphql/graphql.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io/fs" - "net/http" "reflect" "regexp" "strings" @@ -14,8 +13,6 @@ import ( "github.com/operator-framework/operator-registry/alpha/declcfg" ) -var schema graphql.Schema - // FieldInfo represents discovered field information type FieldInfo struct { Name string @@ -565,7 +562,7 @@ func BuildDynamicGraphQLSchema(catalogSchema *CatalogSchema, metasBySchema map[s } // LoadAndSummarizeCatalogDynamic loads FBC using WalkMetasReader and builds dynamic GraphQL schema -func LoadAndSummarizeCatalogDynamic(catalogFS fs.FS) error { +func LoadAndSummarizeCatalogDynamic(catalogFS fs.FS) (*DynamicSchema, error) { var metas []*declcfg.Meta // Collect all metas from the filesystem @@ -579,13 +576,13 @@ func LoadAndSummarizeCatalogDynamic(catalogFS fs.FS) error { return nil }) if err != nil { - return fmt.Errorf("error walking catalog metas: %w", err) + return nil, fmt.Errorf("error walking catalog metas: %w", err) } // Discover schema from collected metas catalogSchema, err := DiscoverSchemaFromMetas(metas) if err != nil { - return fmt.Errorf("error discovering schema: %w", err) + return nil, fmt.Errorf("error discovering schema: %w", err) } // Organize metas by schema for resolvers @@ -599,11 +596,15 @@ func LoadAndSummarizeCatalogDynamic(catalogFS fs.FS) error { // Build dynamic GraphQL schema dynamicSchema, err := BuildDynamicGraphQLSchema(catalogSchema, metasBySchema) if err != nil { - return fmt.Errorf("error building GraphQL schema: %w", err) + return nil, fmt.Errorf("error building GraphQL schema: %w", err) } - // Set the global schema for serving - schema = dynamicSchema.Schema + return dynamicSchema, nil +} + +// PrintCatalogSummary prints a comprehensive summary of the discovered schema +func PrintCatalogSummary(dynamicSchema *DynamicSchema) { + catalogSchema := dynamicSchema.CatalogSchema // Print comprehensive summary fmt.Printf("Dynamic GraphQL schema generation complete.\n") @@ -661,32 +662,4 @@ func LoadAndSummarizeCatalogDynamic(catalogFS fs.FS) error { fmt.Printf(" packages(limit: 5) { name }\n") } fmt.Printf("}\n") - - return nil -} - -// ServeGraphQL starts an HTTPS server with the /graphql endpoint. -func ServeGraphQL(listenAddr, certPath, keyPath string) error { - http.HandleFunc("/graphql", func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "Only POST is allowed", http.StatusMethodNotAllowed) - return - } - var params struct { - Query string `json:"query"` - } - if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil { - http.Error(w, "Invalid request body", http.StatusBadRequest) - return - } - result := graphql.Do(graphql.Params{ - Schema: schema, - RequestString: params.Query, - }) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(result) - }) - - fmt.Printf("Serving GraphQL endpoint at https://%s/graphql\n", listenAddr) - return http.ListenAndServeTLS(listenAddr, certPath, keyPath, nil) } diff --git a/internal/catalogd/storage/localdir.go b/internal/catalogd/storage/localdir.go index 44ef65c58..ea54a89fc 100644 --- a/internal/catalogd/storage/localdir.go +++ b/internal/catalogd/storage/localdir.go @@ -17,6 +17,8 @@ import ( "golang.org/x/sync/singleflight" "k8s.io/apimachinery/pkg/util/sets" + "github.com/graphql-go/graphql" + gql "github.com/operator-framework/operator-controller/internal/catalogd/graphql" "github.com/operator-framework/operator-registry/alpha/declcfg" ) @@ -192,9 +194,16 @@ func (s *LocalDirV1) StorageServerHandler() http.Handler { if s.EnableMetasHandler { mux.HandleFunc(s.RootURL.JoinPath("{catalog}", "api", "v1", "metas").Path, s.handleV1Metas) } + mux.HandleFunc(s.RootURL.JoinPath("{catalog}", "api", "v1", "graphql").Path, s.handleV1GraphQL) + allowedMethodsHandler := func(next http.Handler, allowedMethods ...string) http.Handler { allowedMethodSet := sets.New[string](allowedMethods...) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Allow POST requests for GraphQL endpoint + if r.URL.Path != "" && r.URL.Path[len(r.URL.Path)-7:] == "graphql" && r.Method == http.MethodPost { + next.ServeHTTP(w, r) + return + } if !allowedMethodSet.Has(r.Method) { http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) return @@ -269,6 +278,59 @@ func (s *LocalDirV1) handleV1Metas(w http.ResponseWriter, r *http.Request) { serveJSONLines(w, r, indexReader) } +func (s *LocalDirV1) handleV1GraphQL(w http.ResponseWriter, r *http.Request) { + s.m.RLock() + defer s.m.RUnlock() + + if r.Method != http.MethodPost { + http.Error(w, "Only POST is allowed", http.StatusMethodNotAllowed) + return + } + + catalog := r.PathValue("catalog") + catalogFile, _, err := s.catalogData(catalog) + if err != nil { + httpError(w, err) + return + } + defer catalogFile.Close() + + // Parse GraphQL query from request body + var params struct { + Query string `json:"query"` + } + if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + // Create catalog filesystem from the stored data + catalogFS, err := s.createCatalogFS(catalog) + if err != nil { + httpError(w, err) + return + } + + // Build dynamic GraphQL schema for this catalog + dynamicSchema, err := s.buildCatalogGraphQLSchema(catalogFS) + if err != nil { + httpError(w, err) + return + } + + // Execute GraphQL query + result := graphql.Do(graphql.Params{ + Schema: dynamicSchema.Schema, + RequestString: params.Query, + }) + + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(result); err != nil { + httpError(w, err) + return + } +} + func (s *LocalDirV1) catalogData(catalog string) (*os.File, os.FileInfo, error) { catalogFile, err := os.Open(catalogFilePath(s.catalogDir(catalog))) if err != nil { @@ -328,3 +390,50 @@ func (s *LocalDirV1) getIndex(catalog string) (*index, error) { } return idx.(*index), nil } + +// createCatalogFS creates a filesystem interface for the catalog data +func (s *LocalDirV1) createCatalogFS(catalog string) (fs.FS, error) { + catalogDir := s.catalogDir(catalog) + return os.DirFS(catalogDir), nil +} + +// buildCatalogGraphQLSchema builds a dynamic GraphQL schema for the given catalog +func (s *LocalDirV1) buildCatalogGraphQLSchema(catalogFS fs.FS) (*gql.DynamicSchema, error) { + var metas []*declcfg.Meta + + // Collect all metas from the catalog filesystem + err := declcfg.WalkMetasFS(context.Background(), catalogFS, func(path string, meta *declcfg.Meta, err error) error { + if err != nil { + return err + } + if meta != nil { + metas = append(metas, meta) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("error walking catalog metas: %w", err) + } + + // Discover schema from collected metas + catalogSchema, err := gql.DiscoverSchemaFromMetas(metas) + if err != nil { + return nil, fmt.Errorf("error discovering schema: %w", err) + } + + // Organize metas by schema for resolvers + metasBySchema := make(map[string][]*declcfg.Meta) + for _, meta := range metas { + if meta.Schema != "" { + metasBySchema[meta.Schema] = append(metasBySchema[meta.Schema], meta) + } + } + + // Build dynamic GraphQL schema + dynamicSchema, err := gql.BuildDynamicGraphQLSchema(catalogSchema, metasBySchema) + if err != nil { + return nil, fmt.Errorf("error building GraphQL schema: %w", err) + } + + return dynamicSchema, nil +} From c7edbd20c80c56e271723a738edbb50cba064055 Mon Sep 17 00:00:00 2001 From: grokspawn Date: Thu, 17 Jul 2025 21:22:12 -0500 Subject: [PATCH 3/3] allow POST method only for graphql handler --- internal/catalogd/storage/localdir.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/catalogd/storage/localdir.go b/internal/catalogd/storage/localdir.go index ea54a89fc..9650e1efb 100644 --- a/internal/catalogd/storage/localdir.go +++ b/internal/catalogd/storage/localdir.go @@ -199,9 +199,9 @@ func (s *LocalDirV1) StorageServerHandler() http.Handler { allowedMethodsHandler := func(next http.Handler, allowedMethods ...string) http.Handler { allowedMethodSet := sets.New[string](allowedMethods...) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Allow POST requests for GraphQL endpoint - if r.URL.Path != "" && r.URL.Path[len(r.URL.Path)-7:] == "graphql" && r.Method == http.MethodPost { - next.ServeHTTP(w, r) + // Allow POST requests only for GraphQL endpoint + if r.URL.Path != "" && r.URL.Path[len(r.URL.Path)-7:] != "graphql" && r.Method == http.MethodPost { + http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) return } if !allowedMethodSet.Has(r.Method) { @@ -211,7 +211,7 @@ func (s *LocalDirV1) StorageServerHandler() http.Handler { next.ServeHTTP(w, r) }) } - return allowedMethodsHandler(mux, http.MethodGet, http.MethodHead) + return allowedMethodsHandler(mux, http.MethodGet, http.MethodHead, http.MethodPost) } func (s *LocalDirV1) handleV1All(w http.ResponseWriter, r *http.Request) {