From a64a5f142dc652a4c1b9bb12af62f93345309673 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Sat, 1 Mar 2025 03:19:25 +0000 Subject: [PATCH 01/33] adding driver to export to disk Signed-off-by: Jaydip Gabani --- .github/workflows/disk-export.yaml | 64 ++ Makefile | 125 +-- pkg/audit/manager.go | 54 +- pkg/export/disk/diskwriter.go | 271 ++++++ pkg/export/disk/diskwriter_test.go | 812 ++++++++++++++++++ pkg/export/system.go | 2 + pkg/export/util/util.go | 44 + test/export/fake-reader/Dockerfile | 35 + test/export/fake-reader/export_config.yaml | 12 + test/export/fake-reader/main.go | 122 +++ .../manifest}/publish-components.yaml | 0 11 files changed, 1455 insertions(+), 86 deletions(-) create mode 100644 .github/workflows/disk-export.yaml create mode 100644 pkg/export/disk/diskwriter.go create mode 100644 pkg/export/disk/diskwriter_test.go create mode 100644 pkg/export/util/util.go create mode 100644 test/export/fake-reader/Dockerfile create mode 100644 test/export/fake-reader/export_config.yaml create mode 100644 test/export/fake-reader/main.go rename test/export/{ => fake-subscriber/manifest}/publish-components.yaml (100%) diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml new file mode 100644 index 00000000000..6e692a4401c --- /dev/null +++ b/.github/workflows/disk-export.yaml @@ -0,0 +1,64 @@ +name: dapr-export +on: + push: + paths: + - "pkg/export/dapr" + - "test/export/**" + pull_request: + paths: + - "pkg/export/dapr" + - "test/export/**" +permissions: read-all + +jobs: + dapr_test: + name: "Disk export test" + runs-on: ubuntu-22.04 + timeout-minutes: 15 + steps: + - name: Harden Runner + uses: step-security/harden-runner@cb605e52c26070c328afc4562f0b4ada7618a84e # v2.10.4 + with: + egress-policy: audit + + - name: Check out code into the Go module directory + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + + - name: Bootstrap e2e + run: | + mkdir -p $GITHUB_WORKSPACE/bin + echo "$GITHUB_WORKSPACE/bin" >> $GITHUB_PATH + make e2e-bootstrap + make e2e-reader-build-load-image + + - name: Run e2e + run: | + make docker-buildx IMG=gatekeeper-e2e:latest + make e2e-build-load-externaldata-image + make docker-buildx-crds CRD_IMG=gatekeeper-crds:latest + kind load docker-image --name kind gatekeeper-e2e:latest gatekeeper-crds:latest + make deploy \ + IMG=gatekeeper-e2e:latest \ + USE_LOCAL_IMG=true \ + GENERATE_VAP=true \ + GENERATE_VAPBINDING=true \ + EXPORT_BACKEND=disk + + kubectl apply -f test/export/fake-reader/export_config.yaml + make e2e-publisher-deploy + make test-e2e ENABLE_EXPORT_TESTS=1 + + - name: Save logs + if: ${{ always() }} + run: | + kubectl logs -n gatekeeper-system -l control-plane=audit-controller -c manager --tail=-1 > logs-audit-manager.json + kubectl logs -n gatekeeper-system -l control-plane=audit-controller -c go-sub --tail=-1 > logs-audit-export.json + + - name: Upload artifacts + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + if: ${{ always() }} + with: + name: export-logs + path: | + logs-*.json + diff --git a/Makefile b/Makefile index 7002bfd19dc..1304a992e4e 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,32 @@ NODE_VERSION ?= 16-bullseye-slim YQ_VERSION ?= 4.30.6 HELM_ARGS ?= +HELM_DAPR_ARGS := --set-string auditPodAnnotations.dapr\\.io/enabled=true \ + --set-string auditPodAnnotations.dapr\\.io/app-id=audit \ + --set-string auditPodAnnotations.dapr\\.io/metrics-port=9999 \ + +HELM_EXPORT_ARGS := --set enableViolationExport=${ENABLE_EXPORT} \ + --set audit.connection=${AUDIT_CONNECTION} \ + --set audit.channel=${AUDIT_CHANNEL} \ + +HELM_EXTRA_ARGS := --set image.repository=${HELM_REPO} \ + --set image.crdRepository=${HELM_CRD_REPO} \ + --set image.release=${HELM_RELEASE} \ + --set postInstall.labelNamespace.image.repository=${HELM_CRD_REPO} \ + --set postInstall.labelNamespace.image.tag=${HELM_RELEASE} \ + --set postInstall.labelNamespace.enabled=true \ + --set postInstall.probeWebhook.enabled=true \ + --set emitAdmissionEvents=true \ + --set emitAuditEvents=true \ + --set admissionEventsInvolvedNamespace=true \ + --set auditEventsInvolvedNamespace=true \ + --set disabledBuiltins={http.send} \ + --set logMutations=true \ + --set logLevel=${LOG_LEVEL} \ + --set defaultCreateVAPForTemplates=${GENERATE_VAP} \ + --set defaultCreateVAPBindingForConstraints=${GENERATE_VAPBINDING} \ + --set mutationAnnotations=true;\ + GATEKEEPER_NAMESPACE ?= gatekeeper-system # When updating this, make sure to update the corresponding action in @@ -48,6 +74,7 @@ GOLANGCI_LINT_CACHE := $(shell pwd)/.tmp/golangci-lint BENCHMARK_FILE_NAME ?= benchmarks.txt FAKE_SUBSCRIBER_IMAGE ?= fake-subscriber:latest +FAKE_READER_IMAGE ?= fake-reader:latest ROOT_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) BIN_DIR := $(abspath $(ROOT_DIR)/bin) @@ -106,6 +133,35 @@ MANAGER_IMAGE_PATCH := "apiVersion: apps/v1\ \n - --log-level=${LOG_LEVEL}\ \n" +MANAGER_SIDECAR_IMAGE_PATCH := "\n - --enable-violation-export=true\ +\n - --constraint-violations-limit=0\ +\n - --audit-connection=audit\ +\n - --audit-channel=audit\ +\n volumeMounts:\ +\n - mountPath: /tmp/violations\ +\n name: tmp-violations\ +\n - name: go-sub\ +\n image: fake-reader:latest\ +\n imagePullPolicy: Never\ +\n securityContext:\ +\n allowPrivilegeEscalation: false\ +\n capabilities:\ +\n drop:\ +\n - ALL\ +\n readOnlyRootFilesystem: true\ +\n runAsGroup: 999\ +\n runAsNonRoot: true\ +\n runAsUser: 1000\ +\n seccompProfile:\ +\n type: RuntimeDefault\ +\n volumeMounts:\ +\n - mountPath: /tmp/violations\ +\n name: tmp-violations\ +\n volumes:\ +\n - emptyDir: {}\ +\n name: tmp-violations\ +\n" + # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) GOBIN=$(shell go env GOPATH)/bin @@ -207,48 +263,14 @@ ifeq ($(ENABLE_EXPORT),true) ./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name-template=gatekeeper \ --namespace ${GATEKEEPER_NAMESPACE} \ --debug --wait \ - --set image.repository=${HELM_REPO} \ - --set image.crdRepository=${HELM_CRD_REPO} \ - --set image.release=${HELM_RELEASE} \ - --set postInstall.labelNamespace.image.repository=${HELM_CRD_REPO} \ - --set postInstall.labelNamespace.image.tag=${HELM_RELEASE} \ - --set postInstall.labelNamespace.enabled=true \ - --set postInstall.probeWebhook.enabled=true \ - --set emitAdmissionEvents=true \ - --set emitAuditEvents=true \ - --set admissionEventsInvolvedNamespace=true \ - --set auditEventsInvolvedNamespace=true \ - --set disabledBuiltins={http.send} \ - --set logMutations=true \ - --set enableViolationExport=${ENABLE_EXPORT} \ - --set audit.connection=${AUDIT_CONNECTION} \ - --set audit.channel=${AUDIT_CHANNEL} \ - --set-string auditPodAnnotations.dapr\\.io/enabled=true \ - --set-string auditPodAnnotations.dapr\\.io/app-id=audit \ - --set-string auditPodAnnotations.dapr\\.io/metrics-port=9999 \ - --set logLevel=${LOG_LEVEL} \ - --set mutationAnnotations=true; + $(HELM_EXPORT_ARGS) \ + ${HELM_DAPR_ARGS} \ + $(HELM_EXTRA_ARGS) else ./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name-template=gatekeeper \ --namespace ${GATEKEEPER_NAMESPACE} --create-namespace \ --debug --wait \ - --set image.repository=${HELM_REPO} \ - --set image.crdRepository=${HELM_CRD_REPO} \ - --set image.release=${HELM_RELEASE} \ - --set postInstall.labelNamespace.image.repository=${HELM_CRD_REPO} \ - --set postInstall.labelNamespace.image.tag=${HELM_RELEASE} \ - --set postInstall.labelNamespace.enabled=true \ - --set postInstall.probeWebhook.enabled=true \ - --set emitAdmissionEvents=true \ - --set emitAuditEvents=true \ - --set admissionEventsInvolvedNamespace=true \ - --set auditEventsInvolvedNamespace=true \ - --set disabledBuiltins={http.send} \ - --set logMutations=true \ - --set logLevel=${LOG_LEVEL} \ - --set defaultCreateVAPForTemplates=${GENERATE_VAP} \ - --set defaultCreateVAPBindingForConstraints=${GENERATE_VAPBINDING} \ - --set mutationAnnotations=true; + $(HELM_EXTRA_ARGS) endif e2e-helm-upgrade-init: e2e-helm-install @@ -273,23 +295,7 @@ e2e-helm-upgrade: ./.staging/helm/linux-amd64/helm upgrade gatekeeper manifest_staging/charts/gatekeeper \ --namespace ${GATEKEEPER_NAMESPACE} \ --debug --wait \ - --set image.repository=${HELM_REPO} \ - --set image.crdRepository=${HELM_CRD_REPO} \ - --set image.release=${HELM_RELEASE} \ - --set postInstall.labelNamespace.image.repository=${HELM_CRD_REPO} \ - --set postInstall.labelNamespace.image.tag=${HELM_RELEASE} \ - --set postInstall.labelNamespace.enabled=true \ - --set postInstall.probeWebhook.enabled=true \ - --set emitAdmissionEvents=true \ - --set emitAuditEvents=true \ - --set admissionEventsInvolvedNamespace=true \ - --set auditEventsInvolvedNamespace=true \ - --set disabledBuiltins={http.send} \ - --set logMutations=true \ - --set logLevel=${LOG_LEVEL} \ - --set defaultCreateVAPForTemplates=${GENERATE_VAP} \ - --set defaultCreateVAPBindingForConstraints=${GENERATE_VAPBINDING} \ - --set mutationAnnotations=true;\ + $(HELM_EXTRA_ARGS) e2e-subscriber-build-load-image: docker buildx build --platform="linux/amd64" -t ${FAKE_SUBSCRIBER_IMAGE} --load -f test/export/fake-subscriber/Dockerfile test/export/fake-subscriber @@ -302,7 +308,11 @@ e2e-subscriber-deploy: e2e-publisher-deploy: kubectl get secret redis --namespace=default -o yaml | sed 's/namespace: .*/namespace: gatekeeper-system/' | kubectl apply -f - - kubectl apply -f test/export/publish-components.yaml + kubectl apply -f test/export/fake-subscriber/manifest/publish-components.yaml + +e2e-reader-build-load-image: + docker buildx build --platform="linux/amd64" -t ${FAKE_READER_IMAGE} --load -f test/export/fake-reader/Dockerfile test/export/fake-reader + kind load docker-image --name kind ${FAKE_READER_IMAGE} # Build manager binary manager: generate @@ -327,6 +337,9 @@ deploy: patch-image manifests ifeq ($(ENABLE_GENERATOR_EXPANSION),true) @grep -q -v 'enable-generator-resource-expansion' ./config/overlays/dev/manager_image_patch.yaml && sed -i '/- --operation=webhook/a \ \ \ \ \ \ \ \ - --enable-generator-resource-expansion=true' ./config/overlays/dev/manager_image_patch.yaml @grep -q -v 'enable-generator-resource-expansion' ./config/overlays/dev/manager_image_patch.yaml && sed -i '/- --operation=audit/a \ \ \ \ \ \ \ \ - --enable-generator-resource-expansion=true' ./config/overlays/dev/manager_image_patch.yaml +endif +ifeq ($(EXPORT_BACKEND),disk) + @bash -c 'echo -e ${MANAGER_SIDECAR_IMAGE_PATCH} >> ./config/overlays/dev/manager_image_patch.yaml' endif docker run \ -v $(shell pwd)/config:/config \ diff --git a/pkg/audit/manager.go b/pkg/audit/manager.go index d51b8bc73d0..5ae628c4e6d 100644 --- a/pkg/audit/manager.go +++ b/pkg/audit/manager.go @@ -21,6 +21,7 @@ import ( exportController "github.com/open-policy-agent/gatekeeper/v3/pkg/controller/export" "github.com/open-policy-agent/gatekeeper/v3/pkg/expansion" "github.com/open-policy-agent/gatekeeper/v3/pkg/export" + exportutil "github.com/open-policy-agent/gatekeeper/v3/pkg/export/util" "github.com/open-policy-agent/gatekeeper/v3/pkg/logging" mutationtypes "github.com/open-policy-agent/gatekeeper/v3/pkg/mutation/types" "github.com/open-policy-agent/gatekeeper/v3/pkg/target" @@ -106,28 +107,6 @@ type StatusViolation struct { EnforcementActions []string `json:"enforcementActions,omitempty"` } -// ExportMsg represents export message for each violation. -type ExportMsg struct { - ID string `json:"id,omitempty"` - Details interface{} `json:"details,omitempty"` - EventType string `json:"eventType,omitempty"` - Group string `json:"group,omitempty"` - Version string `json:"version,omitempty"` - Kind string `json:"kind,omitempty"` - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - Message string `json:"message,omitempty"` - EnforcementAction string `json:"enforcementAction,omitempty"` - EnforcementActions []string `json:"enforcementActions,omitempty"` - ConstraintAnnotations map[string]string `json:"constraintAnnotations,omitempty"` - ResourceGroup string `json:"resourceGroup,omitempty"` - ResourceAPIVersion string `json:"resourceAPIVersion,omitempty"` - ResourceKind string `json:"resourceKind,omitempty"` - ResourceNamespace string `json:"resourceNamespace,omitempty"` - ResourceName string `json:"resourceName,omitempty"` - ResourceLabels map[string]string `json:"resourceLabels,omitempty"` -} - // A max PriorityQueue implements heap.Interface and holds StatusViolation. type SVQueue []*StatusViolation @@ -280,17 +259,28 @@ func (am *Manager) audit(ctx context.Context) error { timestamp := startTime.UTC().Format(time.RFC3339) am.log = log.WithValues(logging.AuditID, timestamp) logStart(am.log) + exportErrorMap := make(map[string]error) + if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: "audit is started", ID: timestamp}); err != nil { + exportErrorMap[err.Error()] = err + am.log.Error(err, "failed to export audit start message") + } // record audit latency defer func() { - logFinish(am.log) endTime := time.Now() latency := endTime.Sub(startTime) + logFinish(am.log, latency) if err := am.reporter.reportLatency(latency); err != nil { am.log.Error(err, "failed to report latency") } if err := am.reporter.reportRunEnd(endTime); err != nil { am.log.Error(err, "failed to report run end time") } + if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: "audit is completed", ID: timestamp}); err != nil { + exportErrorMap[err.Error()] = err + } + for _, v := range exportErrorMap { + am.log.Error(v, "failed to export audit violation") + } }() if err := am.reporter.reportRunStart(startTime); err != nil { @@ -334,10 +324,10 @@ func (am *Manager) audit(ctx context.Context) error { am.log.Error(err, "Auditing") } - am.addAuditResponsesToUpdateLists(updateLists, res, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp) + am.addAuditResponsesToUpdateLists(updateLists, res, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp, exportErrorMap) } else { am.log.Info("Auditing via discovery client") - err := am.auditResources(ctx, constraintsGVKs, updateLists, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp) + err := am.auditResources(ctx, constraintsGVKs, updateLists, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp, exportErrorMap) if err != nil { return err } @@ -371,6 +361,7 @@ func (am *Manager) auditResources( totalViolationsPerConstraint map[util.KindVersionName]int64, totalViolationsPerEnforcementAction map[util.EnforcementAction]int64, timestamp string, + exportErrorMap map[string]error, ) error { // delete all from cache dir before starting audit err := am.removeAllFromDir(*apiCacheDir, *auditChunkSize) @@ -558,7 +549,7 @@ func (am *Manager) auditResources( } // Loop through all subDirs to review all files for this kind. am.log.V(logging.DebugLevel).Info("Reviewing objects for GVK", "group", gv.Group, "version", gv.Version, "kind", kind) - err = am.reviewObjects(ctx, kind, folderCount, namespaceCache, updateLists, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp) + err = am.reviewObjects(ctx, kind, folderCount, namespaceCache, updateLists, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp, exportErrorMap) if err != nil { errs = append(errs, err) continue @@ -660,6 +651,7 @@ func (am *Manager) reviewObjects(ctx context.Context, kind string, folderCount i totalViolationsPerConstraint map[util.KindVersionName]int64, totalViolationsPerEnforcementAction map[util.EnforcementAction]int64, timestamp string, + exportErrorMap map[string]error, ) error { for i := 0; i < folderCount; i++ { // cache directory structure: @@ -744,7 +736,7 @@ func (am *Manager) reviewObjects(ctx context.Context, kind string, folderCount i if len(resp.Results()) > 0 { results := ToResults(&augmentedObj.Object, resp) - am.addAuditResponsesToUpdateLists(updateLists, results, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp) + am.addAuditResponsesToUpdateLists(updateLists, results, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp, exportErrorMap) } } } @@ -864,6 +856,7 @@ func (am *Manager) addAuditResponsesToUpdateLists( totalViolationsPerConstraint map[util.KindVersionName]int64, totalViolationsPerEnforcementAction map[util.EnforcementAction]int64, timestamp string, + exportErrorMap map[string]error, ) { for _, r := range res { constraint := r.Constraint @@ -905,7 +898,7 @@ func (am *Manager) addAuditResponsesToUpdateLists( if *exportController.ExportEnabled { err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, violationMsg(constraint, ea, r.ScopedEnforcementActions, gvk, namespace, name, msg, details, labels, timestamp)) if err != nil { - am.log.Error(err, "error exporting audit violation") + exportErrorMap[err.Error()] = err } } if *emitAuditEvents { @@ -1136,10 +1129,11 @@ func logStart(l logr.Logger) { ) } -func logFinish(l logr.Logger) { +func logFinish(l logr.Logger, t time.Duration) { l.Info( "auditing is complete", logging.EventType, "audit_finished", + "duration", t.String(), ) } @@ -1162,7 +1156,7 @@ func violationMsg(constraint *unstructured.Unstructured, enforcementAction util. userConstraintAnnotations := constraint.GetAnnotations() delete(userConstraintAnnotations, "kubectl.kubernetes.io/last-applied-configuration") - return ExportMsg{ + return exportutil.ExportMsg{ Message: message, Details: details, ID: timestamp, diff --git a/pkg/export/disk/diskwriter.go b/pkg/export/disk/diskwriter.go new file mode 100644 index 00000000000..1fbb2a0c6d4 --- /dev/null +++ b/pkg/export/disk/diskwriter.go @@ -0,0 +1,271 @@ +package disk + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/open-policy-agent/gatekeeper/v3/pkg/export/util" + "k8s.io/client-go/util/retry" +) + +type Connection struct { + // path to store audit logs + Path string `json:"path,omitempty"` + // max number of audit results to store + MaxAuditResults int `json:"maxAuditResults,omitempty"` + // File to write audit logs + File *os.File + + // current audit run file name + currentAuditRun string +} + +type Writer struct { + openConnections map[string]Connection +} + +const ( + maxAllowedAuditRuns = 5 +) + +const ( + Name = "diskwriter" +) + +var Connections = &Writer{ + openConnections: make(map[string]Connection), +} + +func (r *Writer) CreateConnection(_ context.Context, connectionName string, config interface{}) error { + cfg, ok := config.(map[string]interface{}) + if !ok { + return fmt.Errorf("invalid config format") + } + + path, pathOk := cfg["path"].(string) + if !pathOk { + return fmt.Errorf("missing or invalid values in config for connection: %s", connectionName) + } + var err error + maxResults, maxResultsOk := cfg["maxAuditResults"].(float64) + if !maxResultsOk { + return fmt.Errorf("missing or invalid 'maxAuditResults' for connection: %s", connectionName) + } + if maxResults > maxAllowedAuditRuns { + return fmt.Errorf("maxAuditResults cannot be greater than %d", maxAllowedAuditRuns) + } + + r.openConnections[connectionName] = Connection{ + Path: path, + MaxAuditResults: int(maxResults), + } + return err +} + +func (r *Writer) UpdateConnection(_ context.Context, connectionName string, config interface{}) error { + cfg, ok := config.(map[string]interface{}) + if !ok { + return fmt.Errorf("invalid config format") + } + + conn, exists := r.openConnections[connectionName] + if !exists { + return fmt.Errorf("connection not found: %s for Disk driver", connectionName) + } + + var err error + if path, ok := cfg["path"].(string); ok { + if conn.Path != path { + if err := os.RemoveAll(conn.Path); err != nil { + err = fmt.Errorf("connection updated but failed to remove content form old path: %w", err) + } + conn.Path = path + } + } else { + return fmt.Errorf("missing or invalid 'path' for connection: %s", connectionName) + } + + if maxResults, ok := cfg["maxAuditResults"].(float64); ok { + if maxResults > maxAllowedAuditRuns { + return fmt.Errorf("maxAuditResults cannot be greater than %d", maxAllowedAuditRuns) + } + conn.MaxAuditResults = int(maxResults) + } else { + return fmt.Errorf("missing or invalid 'maxAuditResults' for connection: %s", connectionName) + } + + r.openConnections[connectionName] = conn + return err +} + +func (r *Writer) CloseConnection(connectionName string) error { + conn, ok := r.openConnections[connectionName] + if !ok { + return fmt.Errorf("connection not found: %s for disk driver", connectionName) + } + err := os.RemoveAll(conn.Path) + delete(r.openConnections, connectionName) + return err +} + +func (r *Writer) Publish(_ context.Context, connectionName string, data interface{}, topic string) error { + conn, ok := r.openConnections[connectionName] + if !ok { + return fmt.Errorf("connection not found: %s for disk driver", connectionName) + } + + var violation util.ExportMsg + if violation, ok = data.(util.ExportMsg); !ok { + return fmt.Errorf("invalid data type, cannot convert data to exportMsg") + } + + if violation.Message == "audit is started" { + err := conn.handleAuditStart(violation.ID, topic) + if err != nil { + return fmt.Errorf("error handling audit start: %w", err) + } + r.openConnections[connectionName] = conn + } + + jsonData, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("error marshaling data: %w", err) + } + + if conn.File == nil { + return fmt.Errorf("no file to write the violation in") + } + + _, err = conn.File.WriteString(string(jsonData) + "\n") + if err != nil { + return fmt.Errorf("error writing message to disk: %w", err) + } + + if violation.Message == "audit is completed" { + err := conn.handleAuditEnd(topic) + if err != nil { + return fmt.Errorf("error handling audit end: %w", err) + } + conn.File = nil + conn.currentAuditRun = "" + r.openConnections[connectionName] = conn + } + return nil +} + +func (conn *Connection) handleAuditStart(auditID string, topic string) error { + conn.currentAuditRun = strings.ReplaceAll(auditID, ":", "_") + + // Ensure the directory exists + dir := path.Join(conn.Path, topic) + if err := os.MkdirAll(dir, 0o755); err != nil { + return fmt.Errorf("failed to create directories: %w", err) + } + + file, err := os.OpenFile(path.Join(dir, appendExtension(conn.currentAuditRun, "txt")), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644) + if err != nil { + return fmt.Errorf("failed to open file: %w", err) + } + conn.File = file + err = retry.OnError(retry.DefaultBackoff, func(_ error) bool { + return true + }, func() error { + return syscall.Flock(int(conn.File.Fd()), syscall.LOCK_EX) + }) + if err != nil { + return fmt.Errorf("failed to acquire lock: %w", err) + } + return nil +} + +func (conn *Connection) handleAuditEnd(topic string) error { + if err := retry.OnError(retry.DefaultBackoff, func(_ error) bool { + return true + }, conn.unlockAndCloseFile); err != nil { + return fmt.Errorf("error closing file: %w, %s", err, conn.currentAuditRun) + } + conn.File = nil + + readyFilePath := path.Join(conn.Path, topic, appendExtension(conn.currentAuditRun, "log")) + if err := os.Rename(path.Join(conn.Path, topic, appendExtension(conn.currentAuditRun, "txt")), readyFilePath); err != nil { + return fmt.Errorf("failed to rename file: %w, %s", err, conn.currentAuditRun) + } + + return conn.cleanupOldAuditFiles(topic) +} + +func (conn *Connection) unlockAndCloseFile() error { + if conn.File == nil { + return fmt.Errorf("no file to close") + } + fd := int(conn.File.Fd()) + if fd < 0 { + return fmt.Errorf("invalid file descriptor") + } + if err := syscall.Flock(fd, syscall.LOCK_UN); err != nil { + return fmt.Errorf("failed to release lock: %w", err) + } + if err := conn.File.Close(); err != nil { + return fmt.Errorf("failed to close file: %w", err) + } + return nil +} + +func (conn *Connection) cleanupOldAuditFiles(topic string) error { + dirPath := path.Join(conn.Path, topic) + + for { + earliestFile, files, err := getEarliestFile(dirPath) + if err != nil { + return fmt.Errorf("error getting earliest file: %w", err) + } + if len(files) <= conn.MaxAuditResults { + break + } + if err := os.Remove(earliestFile); err != nil { + return fmt.Errorf("error removing file: %w", err) + } + } + + return nil +} + +func getEarliestFile(dirPath string) (string, []string, error) { + var earliestFile string + var earliestModTime time.Time + var files []string + + err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() && (earliestFile == "" || info.ModTime().Before(earliestModTime)) { + earliestFile = path + earliestModTime = info.ModTime() + } + if !info.IsDir() { + files = append(files, path) + } + return nil + }) + if err != nil { + return "", files, err + } + + if earliestFile == "" { + return "", files, nil + } + + return earliestFile, files, nil +} + +func appendExtension(name string, ext string) string { + return name + "." + ext +} diff --git a/pkg/export/disk/diskwriter_test.go b/pkg/export/disk/diskwriter_test.go new file mode 100644 index 00000000000..9fae46a8c28 --- /dev/null +++ b/pkg/export/disk/diskwriter_test.go @@ -0,0 +1,812 @@ +package disk + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "slices" + "strings" + "syscall" + "testing" + + "github.com/open-policy-agent/gatekeeper/v3/pkg/export/util" +) + +func TestCreateConnection(t *testing.T) { + writer := &Writer{ + openConnections: make(map[string]Connection), + } + + tests := []struct { + name string + connectionName string + config interface{} + expectError bool + }{ + { + name: "Valid config", + connectionName: "conn1", + config: map[string]interface{}{ + "path": "/tmp/audit", + "maxAuditResults": 3.0, + }, + expectError: false, + }, + { + name: "Invalid config format", + connectionName: "conn2", + config: "invalid config", + expectError: true, + }, + { + name: "Missing path", + connectionName: "conn3", + config: map[string]interface{}{ + "maxAuditResults": 10.0, + }, + expectError: true, + }, + { + name: "Missing maxAuditResults", + connectionName: "conn4", + config: map[string]interface{}{ + "path": "/tmp/audit", + }, + expectError: true, + }, + { + name: "Exceeding maxAuditResults", + connectionName: "conn4", + config: map[string]interface{}{ + "path": "/tmp/audit", + "maxAuditResults": 10.0, + }, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := writer.CreateConnection(context.Background(), tt.connectionName, tt.config) + if (err != nil) != tt.expectError { + t.Errorf("CreateConnection() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + conn, exists := writer.openConnections[tt.connectionName] + if !exists { + t.Errorf("Connection %s was not created", tt.connectionName) + } + path, pathOk := tt.config.(map[string]interface{})["path"].(string) + if !pathOk { + t.Fatalf("Failed to get path from config") + } + if conn.Path != path { + t.Errorf("Expected path %s, got %s", path, conn.Path) + } + maxAuditResults, maxResultsOk := tt.config.(map[string]interface{})["maxAuditResults"].(float64) + if !maxResultsOk { + t.Fatalf("Failed to get maxAuditResults from config") + } + if conn.MaxAuditResults != int(maxAuditResults) { + t.Errorf("Expected maxAuditResults %d, got %d", int(maxAuditResults), conn.MaxAuditResults) + } + } + }) + } +} + +func TestUpdateConnection(t *testing.T) { + writer := &Writer{ + openConnections: make(map[string]Connection), + } + + // Pre-create a connection to update + writer.openConnections["conn1"] = Connection{ + Path: "/tmp/audit", + MaxAuditResults: 3, + } + + tests := []struct { + name string + connectionName string + config interface{} + expectError bool + }{ + { + name: "Valid update", + connectionName: "conn1", + config: map[string]interface{}{ + "path": "/tmp/audit_updated", + "maxAuditResults": 4.0, + }, + expectError: false, + }, + { + name: "Invalid config format", + connectionName: "conn1", + config: "invalid config", + expectError: true, + }, + { + name: "Connection not found", + connectionName: "conn2", + config: map[string]interface{}{ + "path": "/tmp/audit", + "maxAuditResults": 2.0, + }, + expectError: true, + }, + { + name: "Missing path", + connectionName: "conn1", + config: map[string]interface{}{ + "maxAuditResults": 2.0, + }, + expectError: true, + }, + { + name: "Missing maxAuditResults", + connectionName: "conn1", + config: map[string]interface{}{ + "path": "/tmp/audit", + }, + expectError: true, + }, + { + name: "Exceeding maxAuditResults", + connectionName: "conn1", + config: map[string]interface{}{ + "path": "/tmp/audit", + "maxAuditResults": 10.0, + }, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := writer.UpdateConnection(context.Background(), tt.connectionName, tt.config) + if (err != nil) != tt.expectError { + t.Errorf("UpdateConnection() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + conn, exists := writer.openConnections[tt.connectionName] + if !exists { + t.Errorf("Connection %s was not found", tt.connectionName) + } + path, pathOk := tt.config.(map[string]interface{})["path"].(string) + if !pathOk { + t.Fatalf("Failed to get path from config") + } + if conn.Path != path { + t.Errorf("Expected path %s, got %s", path, conn.Path) + } + maxAuditResults, maxResultsOk := tt.config.(map[string]interface{})["maxAuditResults"].(float64) + if !maxResultsOk { + t.Fatalf("Failed to get maxAuditResults from config") + } + if conn.MaxAuditResults != int(maxAuditResults) { + t.Errorf("Expected maxAuditResults %d, got %d", int(maxAuditResults), conn.MaxAuditResults) + } + } + }) + } +} + +func TestCloseConnection(t *testing.T) { + // Add to check clean up + writer := &Writer{ + openConnections: make(map[string]Connection), + } + + // Pre-create a connection to close + writer.openConnections["conn1"] = Connection{ + Path: "/tmp/audit", + MaxAuditResults: 10, + } + + tests := []struct { + name string + connectionName string + expectError bool + }{ + { + name: "Valid close", + connectionName: "conn1", + expectError: false, + }, + { + name: "Connection not found", + connectionName: "conn2", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := writer.CloseConnection(tt.connectionName) + if (err != nil) != tt.expectError { + t.Errorf("CloseConnection() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + _, exists := writer.openConnections[tt.connectionName] + if exists { + t.Errorf("Connection %s was not closed", tt.connectionName) + } + } + }) + } +} + +func TestPublish(t *testing.T) { + writer := &Writer{ + openConnections: make(map[string]Connection), + } + + // Pre-create a connection to publish to + writer.openConnections["conn1"] = Connection{ + Path: "/tmp/audit", + MaxAuditResults: 1, + } + + tests := []struct { + name string + connectionName string + data interface{} + topic string + expectError bool + }{ + { + name: "Valid publish - audit started", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit1", + Message: "audit is started", + }, + topic: "topic1", + expectError: false, + }, + { + name: "Valid publish - audit in progress", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit1", + Message: "audit is in progress", + }, + topic: "topic1", + expectError: false, + }, + { + name: "Valid publish - audit completed", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit1", + Message: "audit is completed", + }, + topic: "topic1", + expectError: false, + }, + { + name: "Invalid data type", + connectionName: "conn1", + data: "invalid data", + topic: "topic1", + expectError: true, + }, + { + name: "Connection not found", + connectionName: "conn2", + data: util.ExportMsg{ + ID: "audit1", + Message: "audit is started", + }, + topic: "topic1", + expectError: true, + }, + { + name: "Valid publish - 2nd audit started", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit2", + Message: "audit is started", + }, + topic: "topic1", + expectError: false, + }, + { + name: "Valid publish - 2nd audit in progress", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit2", + Message: "audit is in progress", + }, + topic: "topic1", + expectError: false, + }, + { + name: "Valid publish - 2nd audit completed", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit2", + Message: "audit is completed", + }, + topic: "topic1", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := writer.Publish(context.Background(), tt.connectionName, tt.data, tt.topic) + if (err != nil) != tt.expectError { + t.Errorf("Publish() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + files, err := listFiles(path.Join(writer.openConnections[tt.connectionName].Path, tt.topic)) + if err != nil { + t.Fatalf("Failed to list files: %v", err) + } + msg, ok := tt.data.(util.ExportMsg) + if !ok { + t.Fatalf("Failed to convert data to ExportMsg") + } + if msg.Message == "audit is started" { + if len(files) > 2 { + t.Errorf("Expected <= 2 file, got %d, %v", len(files), files) + } + if slices.Contains(files, writer.openConnections[tt.connectionName].currentAuditRun+".txt") { + t.Errorf("Expected file %s to exist, but it does not", writer.openConnections[tt.connectionName].currentAuditRun+".txt") + } + } + if msg.Message == "audit is completed" { + if len(files) != 1 { + t.Errorf("Expected 1 file, got %d, %v", len(files), files) + } + if slices.Contains(files, msg.ID+".log") { + t.Errorf("Expected file %s to exist, but it does not, files: %v", msg.ID+".log", files) + } + content, err := os.ReadFile(files[0]) + if err != nil { + t.Fatalf("Failed to read file: %v", err) + } + for _, msg := range []string{"audit is started", "audit is in progress", "audit is completed"} { + if !strings.Contains(string(content), msg) { + t.Errorf("Expected message %q in file %s, but it was not found", msg, files[0]) + } + } + } + } + }) + } + + err := os.RemoveAll("/tmp/audit") + if err != nil { + t.Fatalf("Failed to clean up: %v", err) + } +} + +func TestHandleAuditStart(t *testing.T) { + tests := []struct { + name string + connection Connection + auditID string + topic string + expectError bool + }{ + { + name: "Valid audit start", + connection: Connection{ + Path: "/tmp/audit", + }, + auditID: "audit1", + topic: "topic1", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.connection.handleAuditStart(tt.auditID, tt.topic) + if (err != nil) != tt.expectError { + t.Errorf("handleAuditStart() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + expectedFileName := path.Join(tt.connection.Path, tt.topic, tt.auditID+".txt") + if tt.connection.currentAuditRun != tt.auditID { + t.Errorf("Expected currentAuditRun %s, got %s", tt.auditID, tt.connection.currentAuditRun) + } + if tt.connection.File == nil { + t.Errorf("Expected file to be opened, but it is nil") + } else { + if tt.connection.File.Name() != expectedFileName { + t.Errorf("Expected file name %s, got %s", expectedFileName, tt.connection.File.Name()) + } + tt.connection.File.Close() + } + } + }) + } + + err := os.RemoveAll("/tmp/audit") + if err != nil { + t.Fatalf("Failed to clean up: %v", err) + } +} + +func TestHandleAuditEnd(t *testing.T) { + tests := []struct { + name string + connection Connection + topic string + setup func(conn *Connection) error + expectError bool + expectedFile string + }{ + { + name: "Valid audit end", + connection: Connection{ + Path: "/tmp/audit", + currentAuditRun: "audit1", + }, + topic: "topic1", + setup: func(conn *Connection) error { + dir := path.Join(conn.Path, "topic1") + if err := os.MkdirAll(dir, 0o755); err != nil { + return err + } + file, err := os.Create(path.Join(dir, conn.currentAuditRun+".txt")) + if err != nil { + return err + } + conn.File = file + return nil + }, + expectError: false, + }, + { + name: "Cleanup old audit files error", + connection: Connection{ + Path: "/tmp/audit", + currentAuditRun: "audit1", + MaxAuditResults: 1, + }, + topic: "topic1", + setup: func(conn *Connection) error { + // Create an extra file to trigger cleanup + dir := path.Join(conn.Path, "topic1") + if err := os.MkdirAll(dir, 0o755); err != nil { + return err + } + if _, err := os.Create(path.Join(dir, "extra_audit.log")); err != nil { + return err + } + file, err := os.Create(path.Join(dir, conn.currentAuditRun+".txt")) + if err != nil { + return err + } + conn.File = file + return nil + }, + expectError: false, + expectedFile: "audit1.log", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + if err := tt.setup(&tt.connection); err != nil { + t.Fatalf("Setup failed: %v", err) + } + } + err := tt.connection.handleAuditEnd(tt.topic) + if (err != nil) != tt.expectError { + t.Errorf("handleAuditEnd() error = %v, expectError %v", err, tt.expectError) + } + + if !tt.expectError { + files, err := listFiles(path.Join(tt.connection.Path, tt.topic)) + if err != nil { + t.Fatalf("Failed to list files: %v", err) + } + if slices.Contains(files, tt.expectedFile) { + t.Errorf("Expected file %s to exist, but it does not. Files: %v", tt.expectedFile, files) + } + } + }) + } + + err := os.RemoveAll("/tmp/audit") + if err != nil { + t.Fatalf("Failed to clean up: %v", err) + } +} + +func listFiles(dir string) ([]string, error) { + var files []string + + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + files = append(files, path) + } + return nil + }) + + return files, err +} + +func TestUnlockAndCloseFile(t *testing.T) { + tests := []struct { + name string + connection Connection + setup func(conn *Connection) error + expectError bool + }{ + { + name: "Valid unlock and close", + connection: Connection{ + Path: "/tmp/audit", + }, + setup: func(conn *Connection) error { + if err := os.MkdirAll(conn.Path, 0o755); err != nil { + return err + } + file, err := os.CreateTemp(conn.Path, "testfile") + if err != nil { + return err + } + conn.File = file + return syscall.Flock(int(file.Fd()), syscall.LOCK_EX) + }, + expectError: false, + }, + { + name: "No file to close", + connection: Connection{ + Path: "/tmp/audit", + }, + setup: nil, + expectError: true, + }, + { + name: "Invalid file descriptor", + connection: Connection{ + Path: "/tmp/audit", + }, + setup: func(conn *Connection) error { + file, err := os.CreateTemp(conn.Path, "testfile") + if err != nil { + return err + } + conn.File = file + file.Close() // Close the file to make the descriptor invalid + return nil + }, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + if err := tt.setup(&tt.connection); err != nil { + t.Fatalf("Setup failed: %v", err) + } + } + err := tt.connection.unlockAndCloseFile() + if (err != nil) != tt.expectError { + t.Errorf("unlockAndCloseFile() error = %v, expectError %v", err, tt.expectError) + } + }) + } + + err := os.RemoveAll("/tmp/audit") + if err != nil { + t.Fatalf("Failed to clean up: %v", err) + } +} + +func TestCleanupOldAuditFiles(t *testing.T) { + tests := []struct { + name string + connection Connection + topic string + setup func(conn *Connection) error + expectError bool + expectedFiles int + }{ + { + name: "No files to clean up", + connection: Connection{ + Path: "/tmp/audit", + MaxAuditResults: 5, + }, + topic: "topic1", + setup: func(conn *Connection) error { + return os.MkdirAll(path.Join(conn.Path, "topic1"), 0o755) + }, + expectError: false, + expectedFiles: 0, + }, + { + name: "Files within limit", + connection: Connection{ + Path: "/tmp/audit", + MaxAuditResults: 5, + }, + topic: "topic1", + setup: func(conn *Connection) error { + dir := path.Join(conn.Path, "topic1") + if err := os.MkdirAll(dir, 0o755); err != nil { + return err + } + for i := 0; i < 3; i++ { + if _, err := os.Create(path.Join(dir, fmt.Sprintf("audit%d.txt", i))); err != nil { + return err + } + } + return nil + }, + expectError: false, + expectedFiles: 3, + }, + { + name: "Files exceeding limit", + connection: Connection{ + Path: "/tmp/audit", + MaxAuditResults: 2, + }, + topic: "topic1", + setup: func(conn *Connection) error { + dir := path.Join(conn.Path, "topic1") + if err := os.MkdirAll(dir, 0o755); err != nil { + return err + } + for i := 0; i < 4; i++ { + if _, err := os.Create(path.Join(dir, fmt.Sprintf("audit%d.txt", i))); err != nil { + return err + } + } + return nil + }, + expectError: false, + expectedFiles: 2, + }, + { + name: "Error getting earliest file", + connection: Connection{ + Path: "/invalid/path", + MaxAuditResults: 2, + }, + topic: "topic1", + setup: nil, + expectError: true, + expectedFiles: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + if err := tt.setup(&tt.connection); err != nil { + t.Fatalf("Setup failed: %v", err) + } + } + err := tt.connection.cleanupOldAuditFiles(tt.topic) + if (err != nil) != tt.expectError { + t.Errorf("cleanupOldAuditFiles() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + dir := path.Join(tt.connection.Path, tt.topic) + files, err := os.ReadDir(dir) + if err != nil { + t.Fatalf("Failed to read directory: %v", err) + } + if len(files) != tt.expectedFiles { + t.Errorf("Expected %d files, got %d", tt.expectedFiles, len(files)) + } + } + }) + } + err := os.RemoveAll("/tmp/audit") + if err != nil { + t.Fatalf("Failed to clean up: %v", err) + } +} + +func TestGetEarliestFile(t *testing.T) { + tests := []struct { + name string + setup func(dir string) error + expectedFile string + expectedFiles int + expectError bool + }{ + { + name: "No files in directory", + setup: func(_ string) error { + return nil + }, + expectedFile: "", + expectedFiles: 0, + expectError: false, + }, + { + name: "Single file in directory", + setup: func(dir string) error { + _, err := os.Create(path.Join(dir, "file1.txt")) + return err + }, + expectedFile: "file1.txt", + expectedFiles: 1, + expectError: false, + }, + { + name: "Multiple files in directory", + setup: func(dir string) error { + for i := 1; i <= 3; i++ { + if _, err := os.Create(path.Join(dir, fmt.Sprintf("file%d.txt", i))); err != nil { + return err + } + } + return nil + }, + expectedFile: "file1.txt", + expectedFiles: 3, + expectError: false, + }, + { + name: "Nested directories", + setup: func(dir string) error { + subDir := path.Join(dir, "subdir") + if err := os.Mkdir(subDir, 0o755); err != nil { + return err + } + if _, err := os.Create(path.Join(subDir, "file1.txt")); err != nil { + return err + } + return nil + }, + expectedFile: "subdir/file1.txt", + expectedFiles: 1, + expectError: false, + }, + { + name: "Error walking directory", + setup: func(dir string) error { + return os.Chmod(dir, 0o000) + }, + expectedFile: "", + expectedFiles: 0, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + if tt.setup != nil { + if err := tt.setup(dir); err != nil { + t.Fatalf("Setup failed: %v", err) + } + } + earliestFile, files, err := getEarliestFile(dir) + if (err != nil) != tt.expectError { + t.Errorf("getEarliestFile() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + if len(files) != tt.expectedFiles { + t.Errorf("Expected %d files, got %d", tt.expectedFiles, len(files)) + } + if tt.expectedFile != "" && !strings.HasSuffix(earliestFile, tt.expectedFile) { + t.Errorf("Expected earliest file %s, got %s", tt.expectedFile, earliestFile) + } + } + }) + } +} diff --git a/pkg/export/system.go b/pkg/export/system.go index d9863c35a65..74130e19961 100644 --- a/pkg/export/system.go +++ b/pkg/export/system.go @@ -6,11 +6,13 @@ import ( "sync" "github.com/open-policy-agent/gatekeeper/v3/pkg/export/dapr" + "github.com/open-policy-agent/gatekeeper/v3/pkg/export/disk" "github.com/open-policy-agent/gatekeeper/v3/pkg/export/driver" ) var SupportedDrivers = map[string]driver.Driver{ dapr.Name: dapr.Connections, + disk.Name: disk.Connections, } type System struct { diff --git a/pkg/export/util/util.go b/pkg/export/util/util.go new file mode 100644 index 00000000000..b6a0f4fd784 --- /dev/null +++ b/pkg/export/util/util.go @@ -0,0 +1,44 @@ +package util + +// ExportMsg represents export message for each violation. +type ExportMsg struct { + ID string `json:"id,omitempty"` + Details interface{} `json:"details,omitempty"` + EventType string `json:"eventType,omitempty"` + Group string `json:"group,omitempty"` + Version string `json:"version,omitempty"` + Kind string `json:"kind,omitempty"` + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Message string `json:"message,omitempty"` + EnforcementAction string `json:"enforcementAction,omitempty"` + EnforcementActions []string `json:"enforcementActions,omitempty"` + ConstraintAnnotations map[string]string `json:"constraintAnnotations,omitempty"` + ResourceGroup string `json:"resourceGroup,omitempty"` + ResourceAPIVersion string `json:"resourceAPIVersion,omitempty"` + ResourceKind string `json:"resourceKind,omitempty"` + ResourceNamespace string `json:"resourceNamespace,omitempty"` + ResourceName string `json:"resourceName,omitempty"` + ResourceLabels map[string]string `json:"resourceLabels,omitempty"` +} + +type ExportErr struct { + Code ExportError `json:"code"` + Message string `json:"message"` +} + +func (e ExportErr) Error() string { + return e.Message +} + +type ExportError string + +const ( + ErrConnectionNotFound ExportError = "connection_not_found" + ErrInvalidDataType ExportError = "invalid_data_type" + ErrCreatingFile ExportError = "error_creating_file" + ErrFileDoesNotExist ExportError = "file_does_not_exist" + ErrMarshalingData ExportError = "error_marshaling_data" + ErrWritingMessage ExportError = "error_writing_message" + ErrCleaningUpAudit ExportError = "error_cleaning_up_audit" +) diff --git a/test/export/fake-reader/Dockerfile b/test/export/fake-reader/Dockerfile new file mode 100644 index 00000000000..bc7211fe69e --- /dev/null +++ b/test/export/fake-reader/Dockerfile @@ -0,0 +1,35 @@ +ARG BUILDPLATFORM="linux/amd64" +ARG BUILDERIMAGE="golang:1.22-bookworm" +ARG BASEIMAGE="gcr.io/distroless/static-debian12:nonroot" + +FROM --platform=$BUILDPLATFORM $BUILDERIMAGE as builder + +ARG TARGETPLATFORM +ARG TARGETOS +ARG TARGETARCH +ARG TARGETVARIANT="" +ARG LDFLAGS + +ENV GO111MODULE=on \ + CGO_ENABLED=0 \ + GOOS=${TARGETOS} \ + GOARCH=${TARGETARCH} \ + GOARM=${TARGETVARIANT} + +WORKDIR /go/src/github.com/open-policy-agent/gatekeeper/test/export/fake-reader + +COPY . . + +RUN go mod init && go mod tidy && go mod vendor + +RUN go build -o main + +FROM $BASEIMAGE + +WORKDIR / + +COPY --from=builder /go/src/github.com/open-policy-agent/gatekeeper/test/export/fake-reader/main . + +USER 65532:65532 + +ENTRYPOINT ["/main"] \ No newline at end of file diff --git a/test/export/fake-reader/export_config.yaml b/test/export/fake-reader/export_config.yaml new file mode 100644 index 00000000000..622ebf91da6 --- /dev/null +++ b/test/export/fake-reader/export_config.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: audit + namespace: gatekeeper-system +data: + driver: "diskwriter" + config: | + { + "path": "/tmp/violations", + "maxAuditResults": 3 + } diff --git a/test/export/fake-reader/main.go b/test/export/fake-reader/main.go new file mode 100644 index 00000000000..27cbfadfdf8 --- /dev/null +++ b/test/export/fake-reader/main.go @@ -0,0 +1,122 @@ +package main + +import ( + "bufio" + "fmt" + "log" + "os" + "path/filepath" + "strings" + "syscall" + "time" +) + +type PubsubMsg struct { + ID string `json:"id,omitempty"` + Details interface{} `json:"details,omitempty"` + EventType string `json:"eventType,omitempty"` + Group string `json:"group,omitempty"` + Version string `json:"version,omitempty"` + Kind string `json:"kind,omitempty"` + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Message string `json:"message,omitempty"` + EnforcementAction string `json:"enforcementAction,omitempty"` + ConstraintAnnotations map[string]string `json:"constraintAnnotations,omitempty"` + ResourceGroup string `json:"resourceGroup,omitempty"` + ResourceAPIVersion string `json:"resourceAPIVersion,omitempty"` + ResourceKind string `json:"resourceKind,omitempty"` + ResourceNamespace string `json:"resourceNamespace,omitempty"` + ResourceName string `json:"resourceName,omitempty"` + ResourceLabels map[string]string `json:"resourceLabels,omitempty"` +} + +// Modifications for acturate simulation +// varify if violation exists for a constraint owned by policy and then post it - add sleep (2s) for a batch size of 2k violations +// hold 2k violations in variable - read from tmp-violations.txt +// hold tmp file for previous violations +// 2 files +// 1 - GK publish violations +// 1 - policy read violations. +func main() { + dirPath := "/tmp/violations/" + + for { + // Find the latest created file in dirPath + latestFile, files, err := getLatestFile(dirPath) + log.Printf("out of all files: %v, reading from just %s \n", files, latestFile) + if err != nil { + log.Printf("Error finding latest file: %v\n", err) + time.Sleep(5 * time.Second) + continue + } + // Open the file in read-write mode + file, err := os.OpenFile(latestFile, os.O_RDWR, 0o644) + if err != nil { + log.Printf("Error opening file: %v\n", err) + time.Sleep(5 * time.Second) + continue + } + + // Acquire an exclusive lock on the file + if err := syscall.Flock(int(file.Fd()), syscall.LOCK_EX); err != nil { + log.Fatalf("Error locking file: %v\n", err) + } + + // Read the file content + scanner := bufio.NewScanner(file) + var lines []string + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + if err := scanner.Err(); err != nil { + log.Fatalf("Error reading file: %v\n", err) + } + + // Process the read content + for _, line := range lines { + log.Printf("Processed line: %s\n", line) + } + + // Release the lock + if err := syscall.Flock(int(file.Fd()), syscall.LOCK_UN); err != nil { + log.Fatalf("Error unlocking file: %v\n", err) + } + + // Close the file + if err := file.Close(); err != nil { + log.Fatalf("Error closing file: %v\n", err) + } + time.Sleep(90 * time.Second) + } +} + +func getLatestFile(dirPath string) (string, []string, error) { + var latestFile string + var latestModTime time.Time + var files []string + + err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() && strings.Contains(path, ".log") && (latestFile == "" || info.ModTime().After(latestModTime)) { + latestFile = path + latestModTime = info.ModTime() + } + if !info.IsDir() { + files = append(files, path) + } + return nil + }) + if err != nil { + return "", files, err + } + + if latestFile == "" { + return "", files, fmt.Errorf("no files found in directory: %s", dirPath) + } + + return latestFile, files, nil +} diff --git a/test/export/publish-components.yaml b/test/export/fake-subscriber/manifest/publish-components.yaml similarity index 100% rename from test/export/publish-components.yaml rename to test/export/fake-subscriber/manifest/publish-components.yaml From 6178055cebd0a2f3899a5f100924a75002587e91 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Sat, 1 Mar 2025 03:34:41 +0000 Subject: [PATCH 02/33] fixing lint Signed-off-by: Jaydip Gabani --- pkg/export/disk/diskwriter.go | 8 ++++---- pkg/export/disk/diskwriter_test.go | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/export/disk/diskwriter.go b/pkg/export/disk/diskwriter.go index 1fbb2a0c6d4..75e938f86fd 100644 --- a/pkg/export/disk/diskwriter.go +++ b/pkg/export/disk/diskwriter.go @@ -49,7 +49,7 @@ func (r *Writer) CreateConnection(_ context.Context, connectionName string, conf return fmt.Errorf("invalid config format") } - path, pathOk := cfg["path"].(string) + path, pathOk := cfg["path"].(string) if !pathOk { return fmt.Errorf("missing or invalid values in config for connection: %s", connectionName) } @@ -80,11 +80,11 @@ func (r *Writer) UpdateConnection(_ context.Context, connectionName string, conf return fmt.Errorf("connection not found: %s for Disk driver", connectionName) } - var err error + var cleanUpErr error if path, ok := cfg["path"].(string); ok { if conn.Path != path { if err := os.RemoveAll(conn.Path); err != nil { - err = fmt.Errorf("connection updated but failed to remove content form old path: %w", err) + cleanUpErr = fmt.Errorf("connection updated but failed to remove content form old path: %w", err) } conn.Path = path } @@ -102,7 +102,7 @@ func (r *Writer) UpdateConnection(_ context.Context, connectionName string, conf } r.openConnections[connectionName] = conn - return err + return cleanUpErr } func (r *Writer) CloseConnection(connectionName string) error { diff --git a/pkg/export/disk/diskwriter_test.go b/pkg/export/disk/diskwriter_test.go index 9fae46a8c28..0df72ce2740 100644 --- a/pkg/export/disk/diskwriter_test.go +++ b/pkg/export/disk/diskwriter_test.go @@ -52,7 +52,7 @@ func TestCreateConnection(t *testing.T) { name: "Missing maxAuditResults", connectionName: "conn4", config: map[string]interface{}{ - "path": "/tmp/audit", + "path": "/tmp/audit", }, expectError: true, }, @@ -60,7 +60,7 @@ func TestCreateConnection(t *testing.T) { name: "Exceeding maxAuditResults", connectionName: "conn4", config: map[string]interface{}{ - "path": "/tmp/audit", + "path": "/tmp/audit", "maxAuditResults": 10.0, }, expectError: true, @@ -150,7 +150,7 @@ func TestUpdateConnection(t *testing.T) { name: "Missing maxAuditResults", connectionName: "conn1", config: map[string]interface{}{ - "path": "/tmp/audit", + "path": "/tmp/audit", }, expectError: true, }, @@ -158,7 +158,7 @@ func TestUpdateConnection(t *testing.T) { name: "Exceeding maxAuditResults", connectionName: "conn1", config: map[string]interface{}{ - "path": "/tmp/audit", + "path": "/tmp/audit", "maxAuditResults": 10.0, }, expectError: true, From eeac8e8bbf130b37e3917c00df78b4b0451d13f7 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Sat, 1 Mar 2025 06:04:36 +0000 Subject: [PATCH 03/33] fixing disk export workflow name Signed-off-by: Jaydip Gabani --- .github/workflows/disk-export.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml index 6e692a4401c..098868e5151 100644 --- a/.github/workflows/disk-export.yaml +++ b/.github/workflows/disk-export.yaml @@ -1,17 +1,19 @@ -name: dapr-export +name: disk-export on: push: paths: - "pkg/export/dapr" + - "pkg/export/disk" - "test/export/**" pull_request: paths: - "pkg/export/dapr" + - "pkg/export/disk" - "test/export/**" permissions: read-all jobs: - dapr_test: + disk_test: name: "Disk export test" runs-on: ubuntu-22.04 timeout-minutes: 15 From 83169e55e669c337dcad9a93c1347f61b396837c Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Sun, 2 Mar 2025 00:51:57 +0000 Subject: [PATCH 04/33] fixing disk export tests Signed-off-by: Jaydip Gabani --- .github/workflows/disk-export.yaml | 3 +-- test/bats/test.bats | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml index 098868e5151..5e45dab88d3 100644 --- a/.github/workflows/disk-export.yaml +++ b/.github/workflows/disk-export.yaml @@ -31,7 +31,6 @@ jobs: mkdir -p $GITHUB_WORKSPACE/bin echo "$GITHUB_WORKSPACE/bin" >> $GITHUB_PATH make e2e-bootstrap - make e2e-reader-build-load-image - name: Run e2e run: | @@ -39,6 +38,7 @@ jobs: make e2e-build-load-externaldata-image make docker-buildx-crds CRD_IMG=gatekeeper-crds:latest kind load docker-image --name kind gatekeeper-e2e:latest gatekeeper-crds:latest + make e2e-reader-build-load-image make deploy \ IMG=gatekeeper-e2e:latest \ USE_LOCAL_IMG=true \ @@ -47,7 +47,6 @@ jobs: EXPORT_BACKEND=disk kubectl apply -f test/export/fake-reader/export_config.yaml - make e2e-publisher-deploy make test-e2e ENABLE_EXPORT_TESTS=1 - name: Save logs diff --git a/test/bats/test.bats b/test/bats/test.bats index 1f2a29698a0..8b59e85fbea 100644 --- a/test/bats/test.bats +++ b/test/bats/test.bats @@ -16,6 +16,7 @@ teardown_file() { kubectl label ns ${GATEKEEPER_NAMESPACE} admission.gatekeeper.sh/ignore=no-self-managing --overwrite || true kubectl delete ns \ gatekeeper-test-playground \ + gatekeeper-test-playground-scoped \ gatekeeper-excluded-namespace \ gatekeeper-excluded-prefix-match-namespace \ gatekeeper-excluded-suffix-match-namespace || true From 758396a4293a3080ad86f4cf499e6f8941c1d908 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Sun, 2 Mar 2025 01:01:46 +0000 Subject: [PATCH 05/33] fixing disk-export test Signed-off-by: Jaydip Gabani --- .github/workflows/disk-export.yaml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml index 5e45dab88d3..d173f3d02ee 100644 --- a/.github/workflows/disk-export.yaml +++ b/.github/workflows/disk-export.yaml @@ -34,11 +34,16 @@ jobs: - name: Run e2e run: | - make docker-buildx IMG=gatekeeper-e2e:latest + make docker-buildx \ + IMG=gatekeeper-e2e:latest + make e2e-build-load-externaldata-image - make docker-buildx-crds CRD_IMG=gatekeeper-crds:latest - kind load docker-image --name kind gatekeeper-e2e:latest gatekeeper-crds:latest + + kind load docker-image --name kind \ + gatekeeper-e2e:latest + make e2e-reader-build-load-image + make deploy \ IMG=gatekeeper-e2e:latest \ USE_LOCAL_IMG=true \ From c73ed2bdc094b4469db0316393be0d3445d2063e Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Sun, 2 Mar 2025 01:14:47 +0000 Subject: [PATCH 06/33] fixing disk-export test Signed-off-by: Jaydip Gabani --- .github/workflows/disk-export.yaml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml index d173f3d02ee..7f7184d1d54 100644 --- a/.github/workflows/disk-export.yaml +++ b/.github/workflows/disk-export.yaml @@ -44,12 +44,7 @@ jobs: make e2e-reader-build-load-image - make deploy \ - IMG=gatekeeper-e2e:latest \ - USE_LOCAL_IMG=true \ - GENERATE_VAP=true \ - GENERATE_VAPBINDING=true \ - EXPORT_BACKEND=disk + make deploy IMG=gatekeeper-e2e:latest USE_LOCAL_IMG=true GENERATE_VAP=true GENERATE_VAPBINDING=true EXPORT_BACKEND=disk kubectl apply -f test/export/fake-reader/export_config.yaml make test-e2e ENABLE_EXPORT_TESTS=1 From 1f1c1ea3cc7aecb6d5a46691121a9f92c0c03e94 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Sun, 2 Mar 2025 04:35:14 +0000 Subject: [PATCH 07/33] fixing disk-export test Signed-off-by: Jaydip Gabani --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 1304a992e4e..a781c8258ea 100644 --- a/Makefile +++ b/Makefile @@ -134,7 +134,6 @@ MANAGER_IMAGE_PATCH := "apiVersion: apps/v1\ \n" MANAGER_SIDECAR_IMAGE_PATCH := "\n - --enable-violation-export=true\ -\n - --constraint-violations-limit=0\ \n - --audit-connection=audit\ \n - --audit-channel=audit\ \n volumeMounts:\ From f0b06fe34bad48e0a2421dc58d0f9e955ec6b0a1 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Wed, 5 Mar 2025 00:47:10 +0000 Subject: [PATCH 08/33] adding docs for disk driver Signed-off-by: Jaydip Gabani --- Makefile | 2 +- pkg/export/disk/{diskwriter.go => disk.go} | 2 +- .../disk/{diskwriter_test.go => disk_test.go} | 0 test/export/fake-reader/export_config.yaml | 2 +- test/export/fake-reader/main.go | 2 +- website/docs/export.md | 125 +++++++++++++++++- 6 files changed, 125 insertions(+), 8 deletions(-) rename pkg/export/disk/{diskwriter.go => disk.go} (99%) rename pkg/export/disk/{diskwriter_test.go => disk_test.go} (100%) diff --git a/Makefile b/Makefile index a781c8258ea..c3fd45ba315 100644 --- a/Makefile +++ b/Makefile @@ -140,7 +140,7 @@ MANAGER_SIDECAR_IMAGE_PATCH := "\n - --enable-violation-export=true\ \n - mountPath: /tmp/violations\ \n name: tmp-violations\ \n - name: go-sub\ -\n image: fake-reader:latest\ +\n image: ${FAKE_READER_IMAGE}\ \n imagePullPolicy: Never\ \n securityContext:\ \n allowPrivilegeEscalation: false\ diff --git a/pkg/export/disk/diskwriter.go b/pkg/export/disk/disk.go similarity index 99% rename from pkg/export/disk/diskwriter.go rename to pkg/export/disk/disk.go index 75e938f86fd..f12e4359a88 100644 --- a/pkg/export/disk/diskwriter.go +++ b/pkg/export/disk/disk.go @@ -36,7 +36,7 @@ const ( ) const ( - Name = "diskwriter" + Name = "disk" ) var Connections = &Writer{ diff --git a/pkg/export/disk/diskwriter_test.go b/pkg/export/disk/disk_test.go similarity index 100% rename from pkg/export/disk/diskwriter_test.go rename to pkg/export/disk/disk_test.go diff --git a/test/export/fake-reader/export_config.yaml b/test/export/fake-reader/export_config.yaml index 622ebf91da6..a56339c805f 100644 --- a/test/export/fake-reader/export_config.yaml +++ b/test/export/fake-reader/export_config.yaml @@ -4,7 +4,7 @@ metadata: name: audit namespace: gatekeeper-system data: - driver: "diskwriter" + driver: "disk" config: | { "path": "/tmp/violations", diff --git a/test/export/fake-reader/main.go b/test/export/fake-reader/main.go index 27cbfadfdf8..470e3775259 100644 --- a/test/export/fake-reader/main.go +++ b/test/export/fake-reader/main.go @@ -76,7 +76,7 @@ func main() { // Process the read content for _, line := range lines { - log.Printf("Processed line: %s\n", line) + log.Printf("%s\n", line) } // Release the lock diff --git a/website/docs/export.md b/website/docs/export.md index bf5f434c7c5..b4ec65b269d 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -41,11 +41,13 @@ data: - `config` field is a json object that configures how the connection is made. E.g. which queue messages should be sent to. #### Available drivers -Dapr: https://dapr.io/ + +- Dapr: Export violations using pubsub model provided with [Dapr](https://dapr.io/) +- Disk: Export violations to file system. ### Quick start with exporting violations using Dapr and Redis -#### Prerequisites +#### Prerequisites for Dapr 1. Install Dapr @@ -130,10 +132,10 @@ Dapr: https://dapr.io/ ``` :::important - Please make sure `fake-subscriber` image is built and available in your cluster. Dockerfile to build image for `fake-subscriber` is under [gatekeeper/test/fake-subscriber](https://github.com/open-policy-agent/gatekeeper/tree/master/test/export/fake-subscriber). + Please make sure `fake-subscriber` image is built and available in your cluster. Dockerfile to build image for `fake-subscriber` is under [gatekeeper/test/export/fake-subscriber](https://github.com/open-policy-agent/gatekeeper/tree/master/test/export/fake-subscriber). ::: -#### Configure Gatekeeper with Export enabled +#### Configure Gatekeeper with Export enabled with Dapr 1. Create Gatekeeper namespace, and create Dapr pubsub component and Redis secret in Gatekeeper's namespace (`gatekeeper-system` by default). Please make sure to update `gatekeeper-system` namespace for the next steps if your cluster's Gatekeeper namespace is different. @@ -209,6 +211,121 @@ Dapr: https://dapr.io/ 2023/07/18 20:37:20 main.ExportMsg{ID:"2023-07-18T20:37:19Z", Details:map[string]interface {}{"missing_labels":[]interface {}{"test"}}, EventType:"violation_audited", Group:"constraints.gatekeeper.sh", Version:"v1beta1", Kind:"K8sRequiredLabels", Name:"pod-must-have-test", Namespace:"", Message:"you must provide labels: {\"test\"}", EnforcementAction:"deny", ConstraintAnnotations:map[string]string(nil), ResourceGroup:"", ResourceAPIVersion:"v1", ResourceKind:"Pod", ResourceNamespace:"nginx", ResourceName:"nginx-deployment-58899467f5-j85bs", ResourceLabels:map[string]string{"app":"nginx", "owner":"admin", "pod-template-hash":"58899467f5"}} ``` +### Quick start with exporting violations on node storage using Disk driver via emptyDir + +#### Prerequisites for Disk driver + +1. Build `fake-reader` image from [gatekeeper/test/export/fake-reader](https://github.com/open-policy-agent/gatekeeper/tree/master/test/export/fake-reader) + + ```bash + docker buildx build -t --load -f test/export/fake-reader/Dockerfile test/export/fake-reader + ``` + +2. Update `gatekeeper-audit` deployment to add `emptyDir` volume. + + ```yaml + volumes: + - emptyDir: {} + name: tmp-violations + ``` + + :::tip + You can replace emptyDir to use PVC or any other types of volumes. + ::: + +3. Update `gatekeeper-audit` deployment to add `volumeMount` to `manager` container. + + ```yaml + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations + ``` + +4. Update `gatekeeper-audit` deployment to add a `sidecar` reader container. + + ```yaml + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations + - name: go-sub + image: + imagePullPolicy: Never + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations + ``` + +#### Configure Gatekeeper with Export enabled to Disk + +1. Update `gatekeeper-audit` deployment to add following flags + + ```yaml + ... + - --enable-violation-export=true + - --audit-connection=audit + - --audit-channel=audit + ... + ``` + +2. Deploy Gatekeeper charts with aforementioned changes. + + :::tip + You can use below command that uses a rule defined in [Makefile](https://github.com/open-policy-agent/gatekeeper/blob/master/Makefile) to deploy gatekeeper that mounts emptyDir with sidecar reader container. + + + make deploy IMG=gatekeeper-e2e:latest IMG= EXPORT_BACKEND=disk FAKE_READER_IMAGE= + ::: + + **Note:** Verify that after the audit pod is running there is a Dapr sidecar injected and running along side `manager` container. + +3. Create connection config to establish a connection. + + ```shell + kubectl apply -f - < Date: Fri, 14 Mar 2025 19:28:13 +0000 Subject: [PATCH 09/33] cleaning up disk driver Signed-off-by: Jaydip Gabani --- .github/workflows/disk-export.yaml | 2 +- pkg/audit/manager.go | 4 +-- pkg/export/disk/disk.go | 42 ++++++++++++++++-------------- pkg/export/util/util.go | 5 ++++ 4 files changed, 31 insertions(+), 22 deletions(-) diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml index 7f7184d1d54..4473898cc9c 100644 --- a/.github/workflows/disk-export.yaml +++ b/.github/workflows/disk-export.yaml @@ -24,7 +24,7 @@ jobs: egress-policy: audit - name: Check out code into the Go module directory - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Bootstrap e2e run: | diff --git a/pkg/audit/manager.go b/pkg/audit/manager.go index 5ae628c4e6d..3b437972095 100644 --- a/pkg/audit/manager.go +++ b/pkg/audit/manager.go @@ -260,7 +260,7 @@ func (am *Manager) audit(ctx context.Context) error { am.log = log.WithValues(logging.AuditID, timestamp) logStart(am.log) exportErrorMap := make(map[string]error) - if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: "audit is started", ID: timestamp}); err != nil { + if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: exportutil.AuditStartedMsg, ID: timestamp}); err != nil { exportErrorMap[err.Error()] = err am.log.Error(err, "failed to export audit start message") } @@ -275,7 +275,7 @@ func (am *Manager) audit(ctx context.Context) error { if err := am.reporter.reportRunEnd(endTime); err != nil { am.log.Error(err, "failed to report run end time") } - if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: "audit is completed", ID: timestamp}); err != nil { + if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: exportutil.AuditCompletedMsg, ID: timestamp}); err != nil { exportErrorMap[err.Error()] = err } for _, v := range exportErrorMap { diff --git a/pkg/export/disk/disk.go b/pkg/export/disk/disk.go index f12e4359a88..dea6de47b4a 100644 --- a/pkg/export/disk/disk.go +++ b/pkg/export/disk/disk.go @@ -12,7 +12,9 @@ import ( "time" "github.com/open-policy-agent/gatekeeper/v3/pkg/export/util" + "github.com/open-policy-agent/gatekeeper/v3/pkg/logging" "k8s.io/client-go/util/retry" + logf "sigs.k8s.io/controller-runtime/pkg/log" ) type Connection struct { @@ -32,31 +34,31 @@ type Writer struct { } const ( + Name = "disk" maxAllowedAuditRuns = 5 -) - -const ( - Name = "disk" + maxAuditResults = "maxAuditResults" + violationPath = "path" ) var Connections = &Writer{ openConnections: make(map[string]Connection), } +var log = logf.Log.WithName("disk-driver").WithValues(logging.Process, "export") + func (r *Writer) CreateConnection(_ context.Context, connectionName string, config interface{}) error { cfg, ok := config.(map[string]interface{}) if !ok { return fmt.Errorf("invalid config format") } - path, pathOk := cfg["path"].(string) + path, pathOk := cfg[violationPath].(string) if !pathOk { - return fmt.Errorf("missing or invalid values in config for connection: %s", connectionName) + return fmt.Errorf("missing or invalid values in config for connection %s", connectionName) } - var err error - maxResults, maxResultsOk := cfg["maxAuditResults"].(float64) + maxResults, maxResultsOk := cfg[maxAuditResults].(float64) if !maxResultsOk { - return fmt.Errorf("missing or invalid 'maxAuditResults' for connection: %s", connectionName) + return fmt.Errorf("missing or invalid 'maxAuditResults' for connection %s", connectionName) } if maxResults > maxAllowedAuditRuns { return fmt.Errorf("maxAuditResults cannot be greater than %d", maxAllowedAuditRuns) @@ -66,7 +68,7 @@ func (r *Writer) CreateConnection(_ context.Context, connectionName string, conf Path: path, MaxAuditResults: int(maxResults), } - return err + return nil } func (r *Writer) UpdateConnection(_ context.Context, connectionName string, config interface{}) error { @@ -77,11 +79,11 @@ func (r *Writer) UpdateConnection(_ context.Context, connectionName string, conf conn, exists := r.openConnections[connectionName] if !exists { - return fmt.Errorf("connection not found: %s for Disk driver", connectionName) + return fmt.Errorf("connection %s for disk driver not found", connectionName) } var cleanUpErr error - if path, ok := cfg["path"].(string); ok { + if path, ok := cfg[violationPath].(string); ok { if conn.Path != path { if err := os.RemoveAll(conn.Path); err != nil { cleanUpErr = fmt.Errorf("connection updated but failed to remove content form old path: %w", err) @@ -89,16 +91,16 @@ func (r *Writer) UpdateConnection(_ context.Context, connectionName string, conf conn.Path = path } } else { - return fmt.Errorf("missing or invalid 'path' for connection: %s", connectionName) + return fmt.Errorf("missing or invalid 'path' for connection %s", connectionName) } - if maxResults, ok := cfg["maxAuditResults"].(float64); ok { + if maxResults, ok := cfg[maxAuditResults].(float64); ok { if maxResults > maxAllowedAuditRuns { return fmt.Errorf("maxAuditResults cannot be greater than %d", maxAllowedAuditRuns) } conn.MaxAuditResults = int(maxResults) } else { - return fmt.Errorf("missing or invalid 'maxAuditResults' for connection: %s", connectionName) + return fmt.Errorf("missing or invalid 'maxAuditResults' for connection %s", connectionName) } r.openConnections[connectionName] = conn @@ -108,7 +110,7 @@ func (r *Writer) UpdateConnection(_ context.Context, connectionName string, conf func (r *Writer) CloseConnection(connectionName string) error { conn, ok := r.openConnections[connectionName] if !ok { - return fmt.Errorf("connection not found: %s for disk driver", connectionName) + return fmt.Errorf("connection %s not found for disk driver", connectionName) } err := os.RemoveAll(conn.Path) delete(r.openConnections, connectionName) @@ -118,7 +120,7 @@ func (r *Writer) CloseConnection(connectionName string) error { func (r *Writer) Publish(_ context.Context, connectionName string, data interface{}, topic string) error { conn, ok := r.openConnections[connectionName] if !ok { - return fmt.Errorf("connection not found: %s for disk driver", connectionName) + return fmt.Errorf("connection %s not found for disk driver", connectionName) } var violation util.ExportMsg @@ -126,7 +128,7 @@ func (r *Writer) Publish(_ context.Context, connectionName string, data interfac return fmt.Errorf("invalid data type, cannot convert data to exportMsg") } - if violation.Message == "audit is started" { + if violation.Message == util.AuditStartedMsg { err := conn.handleAuditStart(violation.ID, topic) if err != nil { return fmt.Errorf("error handling audit start: %w", err) @@ -148,7 +150,7 @@ func (r *Writer) Publish(_ context.Context, connectionName string, data interfac return fmt.Errorf("error writing message to disk: %w", err) } - if violation.Message == "audit is completed" { + if violation.Message == util.AuditCompletedMsg { err := conn.handleAuditEnd(topic) if err != nil { return fmt.Errorf("error handling audit end: %w", err) @@ -161,6 +163,7 @@ func (r *Writer) Publish(_ context.Context, connectionName string, data interfac } func (conn *Connection) handleAuditStart(auditID string, topic string) error { + // Replace ':' with '_' to avoid issues with file names in windows conn.currentAuditRun = strings.ReplaceAll(auditID, ":", "_") // Ensure the directory exists @@ -197,6 +200,7 @@ func (conn *Connection) handleAuditEnd(topic string) error { if err := os.Rename(path.Join(conn.Path, topic, appendExtension(conn.currentAuditRun, "txt")), readyFilePath); err != nil { return fmt.Errorf("failed to rename file: %w, %s", err, conn.currentAuditRun) } + log.Info("File renamed", "filename", readyFilePath) return conn.cleanupOldAuditFiles(topic) } diff --git a/pkg/export/util/util.go b/pkg/export/util/util.go index b6a0f4fd784..c78488e5e68 100644 --- a/pkg/export/util/util.go +++ b/pkg/export/util/util.go @@ -42,3 +42,8 @@ const ( ErrWritingMessage ExportError = "error_writing_message" ErrCleaningUpAudit ExportError = "error_cleaning_up_audit" ) + +const ( + AuditStartedMsg = "audit is started" + AuditCompletedMsg = "audit is completed" +) From 08f0a709dc0b6dd86a3668367d8bb093226791b9 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Mon, 17 Mar 2025 22:47:21 +0000 Subject: [PATCH 10/33] adding reader dockerfile to dependabot config Signed-off-by: Jaydip Gabani --- .github/dependabot.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 081d0136be9..d3478b73a6a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -63,3 +63,17 @@ updates: interval: "weekly" commit-message: prefix: "chore" + + - package-ecosystem: "docker" + directory: "/test/export/fake-subscriber" + schedule: + interval: "weekly" + commit-message: + prefix: "chore" + + - package-ecosystem: "docker" + directory: "/test/export/fake-reader" + schedule: + interval: "weekly" + commit-message: + prefix: "chore" From e4bf31eba2dd40e5a25eb908c432616c6fceb74a Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Tue, 18 Mar 2025 19:19:55 +0000 Subject: [PATCH 11/33] updating dockerfile for fake-reader Signed-off-by: Jaydip Gabani --- test/export/fake-reader/Dockerfile | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/test/export/fake-reader/Dockerfile b/test/export/fake-reader/Dockerfile index bc7211fe69e..0e45c91a6cf 100644 --- a/test/export/fake-reader/Dockerfile +++ b/test/export/fake-reader/Dockerfile @@ -1,8 +1,4 @@ -ARG BUILDPLATFORM="linux/amd64" -ARG BUILDERIMAGE="golang:1.22-bookworm" -ARG BASEIMAGE="gcr.io/distroless/static-debian12:nonroot" - -FROM --platform=$BUILDPLATFORM $BUILDERIMAGE as builder +FROM --platform=$BUILDPLATFORM golang:1.23-bookworm@sha256:462f68e1109cc0415f58ba591f11e650b38e193fddc4a683a3b77d29be8bfb2c as builder ARG TARGETPLATFORM ARG TARGETOS @@ -24,7 +20,7 @@ RUN go mod init && go mod tidy && go mod vendor RUN go build -o main -FROM $BASEIMAGE +FROM gcr.io/distroless/static-debian12@sha256:8dd8d3ca2cf283383304fd45a5c9c74d5f2cd9da8d3b077d720e264880077c65 WORKDIR / From 2a52f446f222ac385f536f8f1ad340b2fd5b2633 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Mon, 24 Mar 2025 22:07:18 +0000 Subject: [PATCH 12/33] addressing feedback and cleaning up code Signed-off-by: Jaydip Gabani --- .../export/export_config_controller.go | 2 +- pkg/export/disk/disk.go | 49 +++++++- pkg/export/disk/disk_test.go | 110 +++++++++--------- test/export/fake-reader/main.go | 39 ++----- website/docs/export.md | 4 +- 5 files changed, 114 insertions(+), 90 deletions(-) diff --git a/pkg/controller/export/export_config_controller.go b/pkg/controller/export/export_config_controller.go index 43c7cb155d5..4d7486a5a96 100644 --- a/pkg/controller/export/export_config_controller.go +++ b/pkg/controller/export/export_config_controller.go @@ -124,7 +124,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( err = r.system.UpsertConnection(ctx, config, request.Name, cfg.Data["driver"]) if err != nil { - return reconcile.Result{}, err + return reconcile.Result{Requeue: true}, err } log.Info("Connection upsert successful", "name", request.Name, "driver", cfg.Data["driver"]) diff --git a/pkg/export/disk/disk.go b/pkg/export/disk/disk.go index dea6de47b4a..0a32e7316c3 100644 --- a/pkg/export/disk/disk.go +++ b/pkg/export/disk/disk.go @@ -56,6 +56,9 @@ func (r *Writer) CreateConnection(_ context.Context, connectionName string, conf if !pathOk { return fmt.Errorf("missing or invalid values in config for connection %s", connectionName) } + if err := validatePath(path); err != nil { + return fmt.Errorf("invalid path: %w", err) + } maxResults, maxResultsOk := cfg[maxAuditResults].(float64) if !maxResultsOk { return fmt.Errorf("missing or invalid 'maxAuditResults' for connection %s", connectionName) @@ -82,13 +85,21 @@ func (r *Writer) UpdateConnection(_ context.Context, connectionName string, conf return fmt.Errorf("connection %s for disk driver not found", connectionName) } - var cleanUpErr error if path, ok := cfg[violationPath].(string); ok { if conn.Path != path { + if err := validatePath(path); err != nil { + return fmt.Errorf("invalid path: %w", err) + } + if conn.File != nil { + if err := conn.unlockAndCloseFile(); err != nil { + return fmt.Errorf("connection update failed, error closing file: %w.", err) + } + } if err := os.RemoveAll(conn.Path); err != nil { - cleanUpErr = fmt.Errorf("connection updated but failed to remove content form old path: %w", err) + return fmt.Errorf("connection update failed, error deleting violations stored at old path: %w.", err) } conn.Path = path + conn.File = nil } } else { return fmt.Errorf("missing or invalid 'path' for connection %s", connectionName) @@ -104,7 +115,7 @@ func (r *Writer) UpdateConnection(_ context.Context, connectionName string, conf } r.openConnections[connectionName] = conn - return cleanUpErr + return nil } func (r *Writer) CloseConnection(connectionName string) error { @@ -112,8 +123,13 @@ func (r *Writer) CloseConnection(connectionName string) error { if !ok { return fmt.Errorf("connection %s not found for disk driver", connectionName) } - err := os.RemoveAll(conn.Path) delete(r.openConnections, connectionName) + if conn.File != nil { + if err := conn.unlockAndCloseFile(); err != nil { + return fmt.Errorf("connection is closed without removing respective violations. error closing file: %w", err) + } + } + err := os.RemoveAll(conn.Path) return err } @@ -142,7 +158,7 @@ func (r *Writer) Publish(_ context.Context, connectionName string, data interfac } if conn.File == nil { - return fmt.Errorf("no file to write the violation in") + return fmt.Errorf("failed to write violation: no file provided") } _, err = conn.File.WriteString(string(jsonData) + "\n") @@ -185,6 +201,7 @@ func (conn *Connection) handleAuditStart(auditID string, topic string) error { if err != nil { return fmt.Errorf("failed to acquire lock: %w", err) } + log.Info("Writing latest violations at ") return nil } @@ -273,3 +290,25 @@ func getEarliestFile(dirPath string) (string, []string, error) { func appendExtension(name string, ext string) string { return name + "." + ext } + +// validatePath checks if the provided path is valid and writable. +func validatePath(path string) error { + if path == "" { + return fmt.Errorf("path cannot be empty") + } + if strings.Contains(path, "..") { + return fmt.Errorf("path must not contain '..', dir traversal is not allowed") + } + // validate if the path is writable + if err := os.MkdirAll(path, 0o755); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + info, err := os.Stat(path) + if err != nil { + return fmt.Errorf("failed to stat path: %w", err) + } + if !info.IsDir() { + return fmt.Errorf("path is not a directory") + } + return nil +} \ No newline at end of file diff --git a/pkg/export/disk/disk_test.go b/pkg/export/disk/disk_test.go index 0df72ce2740..6bc6c9027c5 100644 --- a/pkg/export/disk/disk_test.go +++ b/pkg/export/disk/disk_test.go @@ -18,7 +18,7 @@ func TestCreateConnection(t *testing.T) { writer := &Writer{ openConnections: make(map[string]Connection), } - + tmpPath := t.TempDir() tests := []struct { name string connectionName string @@ -29,7 +29,7 @@ func TestCreateConnection(t *testing.T) { name: "Valid config", connectionName: "conn1", config: map[string]interface{}{ - "path": "/tmp/audit", + "path": tmpPath, "maxAuditResults": 3.0, }, expectError: false, @@ -52,7 +52,7 @@ func TestCreateConnection(t *testing.T) { name: "Missing maxAuditResults", connectionName: "conn4", config: map[string]interface{}{ - "path": "/tmp/audit", + "path": tmpPath, }, expectError: true, }, @@ -60,7 +60,7 @@ func TestCreateConnection(t *testing.T) { name: "Exceeding maxAuditResults", connectionName: "conn4", config: map[string]interface{}{ - "path": "/tmp/audit", + "path": tmpPath, "maxAuditResults": 10.0, }, expectError: true, @@ -101,10 +101,9 @@ func TestUpdateConnection(t *testing.T) { writer := &Writer{ openConnections: make(map[string]Connection), } - - // Pre-create a connection to update + tmpPath := t.TempDir() writer.openConnections["conn1"] = Connection{ - Path: "/tmp/audit", + Path: tmpPath, MaxAuditResults: 3, } @@ -118,7 +117,7 @@ func TestUpdateConnection(t *testing.T) { name: "Valid update", connectionName: "conn1", config: map[string]interface{}{ - "path": "/tmp/audit_updated", + "path": t.TempDir(), "maxAuditResults": 4.0, }, expectError: false, @@ -133,7 +132,7 @@ func TestUpdateConnection(t *testing.T) { name: "Connection not found", connectionName: "conn2", config: map[string]interface{}{ - "path": "/tmp/audit", + "path": t.TempDir(), "maxAuditResults": 2.0, }, expectError: true, @@ -150,7 +149,7 @@ func TestUpdateConnection(t *testing.T) { name: "Missing maxAuditResults", connectionName: "conn1", config: map[string]interface{}{ - "path": "/tmp/audit", + "path": t.TempDir(), }, expectError: true, }, @@ -158,7 +157,7 @@ func TestUpdateConnection(t *testing.T) { name: "Exceeding maxAuditResults", connectionName: "conn1", config: map[string]interface{}{ - "path": "/tmp/audit", + "path": t.TempDir(), "maxAuditResults": 10.0, }, expectError: true, @@ -201,31 +200,62 @@ func TestCloseConnection(t *testing.T) { openConnections: make(map[string]Connection), } - // Pre-create a connection to close - writer.openConnections["conn1"] = Connection{ - Path: "/tmp/audit", - MaxAuditResults: 10, - } - tests := []struct { name string connectionName string + setup func() error expectError bool }{ { name: "Valid close", connectionName: "conn1", + setup: func() error { + // Pre-create a connection to close + writer.openConnections["conn1"] = Connection{ + Path: t.TempDir(), + MaxAuditResults: 10, + } + return nil + }, expectError: false, }, { name: "Connection not found", connectionName: "conn2", + setup: nil, expectError: true, }, + { + name: "Valid close with open and locked file", + connectionName: "conn3", + setup: func() error { + // Pre-create a connection to close + d := t.TempDir() + if err := os.MkdirAll(d, 0o755); err != nil { + return err + } + file, err := os.CreateTemp(d, "testfile") + if err != nil { + return err + } + writer.openConnections["conn3"] = Connection{ + Path: d, + MaxAuditResults: 10, + File: file, + } + return syscall.Flock(int(file.Fd()), syscall.LOCK_EX) + }, + expectError: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + if err := tt.setup(); err != nil { + t.Fatalf("Setup failed: %v", err) + } + } err := writer.CloseConnection(tt.connectionName) if (err != nil) != tt.expectError { t.Errorf("CloseConnection() error = %v, expectError %v", err, tt.expectError) @@ -247,7 +277,7 @@ func TestPublish(t *testing.T) { // Pre-create a connection to publish to writer.openConnections["conn1"] = Connection{ - Path: "/tmp/audit", + Path: t.TempDir(), MaxAuditResults: 1, } @@ -380,11 +410,6 @@ func TestPublish(t *testing.T) { } }) } - - err := os.RemoveAll("/tmp/audit") - if err != nil { - t.Fatalf("Failed to clean up: %v", err) - } } func TestHandleAuditStart(t *testing.T) { @@ -398,7 +423,7 @@ func TestHandleAuditStart(t *testing.T) { { name: "Valid audit start", connection: Connection{ - Path: "/tmp/audit", + Path: t.TempDir(), }, auditID: "audit1", topic: "topic1", @@ -428,11 +453,6 @@ func TestHandleAuditStart(t *testing.T) { } }) } - - err := os.RemoveAll("/tmp/audit") - if err != nil { - t.Fatalf("Failed to clean up: %v", err) - } } func TestHandleAuditEnd(t *testing.T) { @@ -447,7 +467,7 @@ func TestHandleAuditEnd(t *testing.T) { { name: "Valid audit end", connection: Connection{ - Path: "/tmp/audit", + Path: t.TempDir(), currentAuditRun: "audit1", }, topic: "topic1", @@ -468,7 +488,7 @@ func TestHandleAuditEnd(t *testing.T) { { name: "Cleanup old audit files error", connection: Connection{ - Path: "/tmp/audit", + Path: t.TempDir(), currentAuditRun: "audit1", MaxAuditResults: 1, }, @@ -517,11 +537,6 @@ func TestHandleAuditEnd(t *testing.T) { } }) } - - err := os.RemoveAll("/tmp/audit") - if err != nil { - t.Fatalf("Failed to clean up: %v", err) - } } func listFiles(dir string) ([]string, error) { @@ -550,7 +565,7 @@ func TestUnlockAndCloseFile(t *testing.T) { { name: "Valid unlock and close", connection: Connection{ - Path: "/tmp/audit", + Path: t.TempDir(), }, setup: func(conn *Connection) error { if err := os.MkdirAll(conn.Path, 0o755); err != nil { @@ -568,7 +583,7 @@ func TestUnlockAndCloseFile(t *testing.T) { { name: "No file to close", connection: Connection{ - Path: "/tmp/audit", + Path: t.TempDir(), }, setup: nil, expectError: true, @@ -576,7 +591,7 @@ func TestUnlockAndCloseFile(t *testing.T) { { name: "Invalid file descriptor", connection: Connection{ - Path: "/tmp/audit", + Path: t.TempDir(), }, setup: func(conn *Connection) error { file, err := os.CreateTemp(conn.Path, "testfile") @@ -604,11 +619,6 @@ func TestUnlockAndCloseFile(t *testing.T) { } }) } - - err := os.RemoveAll("/tmp/audit") - if err != nil { - t.Fatalf("Failed to clean up: %v", err) - } } func TestCleanupOldAuditFiles(t *testing.T) { @@ -623,7 +633,7 @@ func TestCleanupOldAuditFiles(t *testing.T) { { name: "No files to clean up", connection: Connection{ - Path: "/tmp/audit", + Path: t.TempDir(), MaxAuditResults: 5, }, topic: "topic1", @@ -636,7 +646,7 @@ func TestCleanupOldAuditFiles(t *testing.T) { { name: "Files within limit", connection: Connection{ - Path: "/tmp/audit", + Path: t.TempDir(), MaxAuditResults: 5, }, topic: "topic1", @@ -658,7 +668,7 @@ func TestCleanupOldAuditFiles(t *testing.T) { { name: "Files exceeding limit", connection: Connection{ - Path: "/tmp/audit", + Path: t.TempDir(), MaxAuditResults: 2, }, topic: "topic1", @@ -680,7 +690,7 @@ func TestCleanupOldAuditFiles(t *testing.T) { { name: "Error getting earliest file", connection: Connection{ - Path: "/invalid/path", + Path: t.TempDir(), MaxAuditResults: 2, }, topic: "topic1", @@ -713,10 +723,6 @@ func TestCleanupOldAuditFiles(t *testing.T) { } }) } - err := os.RemoveAll("/tmp/audit") - if err != nil { - t.Fatalf("Failed to clean up: %v", err) - } } func TestGetEarliestFile(t *testing.T) { diff --git a/test/export/fake-reader/main.go b/test/export/fake-reader/main.go index 470e3775259..529635b2040 100644 --- a/test/export/fake-reader/main.go +++ b/test/export/fake-reader/main.go @@ -11,35 +11,17 @@ import ( "time" ) -type PubsubMsg struct { - ID string `json:"id,omitempty"` - Details interface{} `json:"details,omitempty"` - EventType string `json:"eventType,omitempty"` - Group string `json:"group,omitempty"` - Version string `json:"version,omitempty"` - Kind string `json:"kind,omitempty"` - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - Message string `json:"message,omitempty"` - EnforcementAction string `json:"enforcementAction,omitempty"` - ConstraintAnnotations map[string]string `json:"constraintAnnotations,omitempty"` - ResourceGroup string `json:"resourceGroup,omitempty"` - ResourceAPIVersion string `json:"resourceAPIVersion,omitempty"` - ResourceKind string `json:"resourceKind,omitempty"` - ResourceNamespace string `json:"resourceNamespace,omitempty"` - ResourceName string `json:"resourceName,omitempty"` - ResourceLabels map[string]string `json:"resourceLabels,omitempty"` -} - -// Modifications for acturate simulation -// varify if violation exists for a constraint owned by policy and then post it - add sleep (2s) for a batch size of 2k violations -// hold 2k violations in variable - read from tmp-violations.txt -// hold tmp file for previous violations -// 2 files -// 1 - GK publish violations -// 1 - policy read violations. func main() { dirPath := "/tmp/violations/" + info, err := os.Stat(dirPath) + if err != nil { + log.Fatalf("failed to stat path: %v", err) + os.Exit(1) + } + if !info.IsDir() { + log.Fatalf("path is not a directory") + os.Exit(1) + } for { // Find the latest created file in dirPath @@ -50,8 +32,7 @@ func main() { time.Sleep(5 * time.Second) continue } - // Open the file in read-write mode - file, err := os.OpenFile(latestFile, os.O_RDWR, 0o644) + file, err := os.OpenFile(latestFile, os.O_RDONLY, 0o644) if err != nil { log.Printf("Error opening file: %v\n", err) time.Sleep(5 * time.Second) diff --git a/website/docs/export.md b/website/docs/export.md index b4ec65b269d..887365279f0 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -283,8 +283,7 @@ data: :::tip You can use below command that uses a rule defined in [Makefile](https://github.com/open-policy-agent/gatekeeper/blob/master/Makefile) to deploy gatekeeper that mounts emptyDir with sidecar reader container. - - make deploy IMG=gatekeeper-e2e:latest IMG= EXPORT_BACKEND=disk FAKE_READER_IMAGE= + make deploy IMG= EXPORT_BACKEND=disk FAKE_READER_IMAGE= ::: **Note:** Verify that after the audit pod is running there is a Dapr sidecar injected and running along side `manager` container. @@ -305,7 +304,6 @@ data: "path": "/tmp/violations", "maxAuditResults": 3 } - EOF ``` From 47508795a8f1671598d638c40376e0282398dc8c Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Tue, 25 Mar 2025 00:16:26 +0000 Subject: [PATCH 13/33] fixing lint Signed-off-by: Jaydip Gabani --- pkg/export/disk/disk.go | 8 +-- pkg/export/disk/disk_test.go | 89 ++++++++++++++++++++++++--------- test/export/fake-reader/main.go | 19 ++++--- 3 files changed, 78 insertions(+), 38 deletions(-) diff --git a/pkg/export/disk/disk.go b/pkg/export/disk/disk.go index 0a32e7316c3..058f6cc22d6 100644 --- a/pkg/export/disk/disk.go +++ b/pkg/export/disk/disk.go @@ -92,11 +92,11 @@ func (r *Writer) UpdateConnection(_ context.Context, connectionName string, conf } if conn.File != nil { if err := conn.unlockAndCloseFile(); err != nil { - return fmt.Errorf("connection update failed, error closing file: %w.", err) + return fmt.Errorf("connection update failed, error closing file: %w", err) } } if err := os.RemoveAll(conn.Path); err != nil { - return fmt.Errorf("connection update failed, error deleting violations stored at old path: %w.", err) + return fmt.Errorf("connection update failed, error deleting violations stored at old path: %w", err) } conn.Path = path conn.File = nil @@ -297,7 +297,7 @@ func validatePath(path string) error { return fmt.Errorf("path cannot be empty") } if strings.Contains(path, "..") { - return fmt.Errorf("path must not contain '..', dir traversal is not allowed") + return fmt.Errorf("path must not contain '..', dir traversal is not allowed") } // validate if the path is writable if err := os.MkdirAll(path, 0o755); err != nil { @@ -311,4 +311,4 @@ func validatePath(path string) error { return fmt.Errorf("path is not a directory") } return nil -} \ No newline at end of file +} diff --git a/pkg/export/disk/disk_test.go b/pkg/export/disk/disk_test.go index 6bc6c9027c5..bb205e00d9f 100644 --- a/pkg/export/disk/disk_test.go +++ b/pkg/export/disk/disk_test.go @@ -23,6 +23,7 @@ func TestCreateConnection(t *testing.T) { name string connectionName string config interface{} + err error expectError bool }{ { @@ -37,8 +38,11 @@ func TestCreateConnection(t *testing.T) { { name: "Invalid config format", connectionName: "conn2", - config: "invalid config", - expectError: true, + config: map[int]interface{}{ + 1: "test", + }, + err: fmt.Errorf("invalid config format"), + expectError: true, }, { name: "Missing path", @@ -46,6 +50,7 @@ func TestCreateConnection(t *testing.T) { config: map[string]interface{}{ "maxAuditResults": 10.0, }, + err: fmt.Errorf("missing or invalid 'path' for connection conn3"), expectError: true, }, { @@ -54,6 +59,7 @@ func TestCreateConnection(t *testing.T) { config: map[string]interface{}{ "path": tmpPath, }, + err: fmt.Errorf("missing or invalid 'maxAuditResults' for connection conn4"), expectError: true, }, { @@ -63,6 +69,7 @@ func TestCreateConnection(t *testing.T) { "path": tmpPath, "maxAuditResults": 10.0, }, + err: fmt.Errorf("maxAuditResults cannot be greater than 5"), expectError: true, }, } @@ -70,7 +77,7 @@ func TestCreateConnection(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := writer.CreateConnection(context.Background(), tt.connectionName, tt.config) - if (err != nil) != tt.expectError { + if tt.expectError && tt.err.Error() != err.Error() { t.Errorf("CreateConnection() error = %v, expectError %v", err, tt.expectError) } if !tt.expectError { @@ -80,14 +87,21 @@ func TestCreateConnection(t *testing.T) { } path, pathOk := tt.config.(map[string]interface{})["path"].(string) if !pathOk { - t.Fatalf("Failed to get path from config") + t.Errorf("Failed to get path from config") } if conn.Path != path { t.Errorf("Expected path %s, got %s", path, conn.Path) } + info, err := os.Stat(path) + if err != nil { + t.Errorf("failed to stat path: %s", err.Error()) + } + if !info.IsDir() { + t.Errorf("path is not a directory") + } maxAuditResults, maxResultsOk := tt.config.(map[string]interface{})["maxAuditResults"].(float64) if !maxResultsOk { - t.Fatalf("Failed to get maxAuditResults from config") + t.Errorf("Failed to get maxAuditResults from config") } if conn.MaxAuditResults != int(maxAuditResults) { t.Errorf("Expected maxAuditResults %d, got %d", int(maxAuditResults), conn.MaxAuditResults) @@ -102,9 +116,20 @@ func TestUpdateConnection(t *testing.T) { openConnections: make(map[string]Connection), } tmpPath := t.TempDir() + file, err := os.CreateTemp(tmpPath, "testfile") + if err != nil { + t.Errorf("Failed to create temp file: %v", err) + } + + err = syscall.Flock(int(file.Fd()), syscall.LOCK_EX) + if err != nil { + t.Errorf("Failed to lock file: %v", err) + } + writer.openConnections["conn1"] = Connection{ Path: tmpPath, MaxAuditResults: 3, + File: file, } tests := []struct { @@ -112,6 +137,7 @@ func TestUpdateConnection(t *testing.T) { connectionName string config interface{} expectError bool + err error }{ { name: "Valid update", @@ -121,12 +147,16 @@ func TestUpdateConnection(t *testing.T) { "maxAuditResults": 4.0, }, expectError: false, + err: nil, }, { name: "Invalid config format", connectionName: "conn1", - config: "invalid config", - expectError: true, + config: map[int]interface{}{ + 1: "test", + }, + expectError: true, + err: fmt.Errorf("invalid config format"), }, { name: "Connection not found", @@ -136,6 +166,7 @@ func TestUpdateConnection(t *testing.T) { "maxAuditResults": 2.0, }, expectError: true, + err: fmt.Errorf("connection conn2 for disk driver not found"), }, { name: "Missing path", @@ -144,6 +175,7 @@ func TestUpdateConnection(t *testing.T) { "maxAuditResults": 2.0, }, expectError: true, + err: fmt.Errorf("missing or invalid 'path' for connection conn1"), }, { name: "Missing maxAuditResults", @@ -152,6 +184,7 @@ func TestUpdateConnection(t *testing.T) { "path": t.TempDir(), }, expectError: true, + err: fmt.Errorf("missing or invalid 'maxAuditResults' for connection conn1"), }, { name: "Exceeding maxAuditResults", @@ -161,13 +194,14 @@ func TestUpdateConnection(t *testing.T) { "maxAuditResults": 10.0, }, expectError: true, + err: fmt.Errorf("maxAuditResults cannot be greater than 5"), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := writer.UpdateConnection(context.Background(), tt.connectionName, tt.config) - if (err != nil) != tt.expectError { + if tt.expectError && tt.err.Error() != err.Error() { t.Errorf("UpdateConnection() error = %v, expectError %v", err, tt.expectError) } if !tt.expectError { @@ -177,14 +211,21 @@ func TestUpdateConnection(t *testing.T) { } path, pathOk := tt.config.(map[string]interface{})["path"].(string) if !pathOk { - t.Fatalf("Failed to get path from config") + t.Errorf("Failed to get path from config") } if conn.Path != path { t.Errorf("Expected path %s, got %s", path, conn.Path) } + info, err := os.Stat(path) + if err != nil { + t.Errorf("failed to stat path: %s", err.Error()) + } + if !info.IsDir() { + t.Errorf("path is not a directory") + } maxAuditResults, maxResultsOk := tt.config.(map[string]interface{})["maxAuditResults"].(float64) if !maxResultsOk { - t.Fatalf("Failed to get maxAuditResults from config") + t.Errorf("Failed to get maxAuditResults from config") } if conn.MaxAuditResults != int(maxAuditResults) { t.Errorf("Expected maxAuditResults %d, got %d", int(maxAuditResults), conn.MaxAuditResults) @@ -217,12 +258,12 @@ func TestCloseConnection(t *testing.T) { } return nil }, - expectError: false, + expectError: false, }, { name: "Connection not found", connectionName: "conn2", - setup: nil, + setup: nil, expectError: true, }, { @@ -241,11 +282,11 @@ func TestCloseConnection(t *testing.T) { writer.openConnections["conn3"] = Connection{ Path: d, MaxAuditResults: 10, - File: file, + File: file, } return syscall.Flock(int(file.Fd()), syscall.LOCK_EX) }, - expectError: false, + expectError: false, }, } @@ -253,7 +294,7 @@ func TestCloseConnection(t *testing.T) { t.Run(tt.name, func(t *testing.T) { if tt.setup != nil { if err := tt.setup(); err != nil { - t.Fatalf("Setup failed: %v", err) + t.Errorf("Setup failed: %v", err) } } err := writer.CloseConnection(tt.connectionName) @@ -376,11 +417,11 @@ func TestPublish(t *testing.T) { if !tt.expectError { files, err := listFiles(path.Join(writer.openConnections[tt.connectionName].Path, tt.topic)) if err != nil { - t.Fatalf("Failed to list files: %v", err) + t.Errorf("Failed to list files: %v", err) } msg, ok := tt.data.(util.ExportMsg) if !ok { - t.Fatalf("Failed to convert data to ExportMsg") + t.Errorf("Failed to convert data to ExportMsg") } if msg.Message == "audit is started" { if len(files) > 2 { @@ -399,7 +440,7 @@ func TestPublish(t *testing.T) { } content, err := os.ReadFile(files[0]) if err != nil { - t.Fatalf("Failed to read file: %v", err) + t.Errorf("Failed to read file: %v", err) } for _, msg := range []string{"audit is started", "audit is in progress", "audit is completed"} { if !strings.Contains(string(content), msg) { @@ -518,7 +559,7 @@ func TestHandleAuditEnd(t *testing.T) { t.Run(tt.name, func(t *testing.T) { if tt.setup != nil { if err := tt.setup(&tt.connection); err != nil { - t.Fatalf("Setup failed: %v", err) + t.Errorf("Setup failed: %v", err) } } err := tt.connection.handleAuditEnd(tt.topic) @@ -529,7 +570,7 @@ func TestHandleAuditEnd(t *testing.T) { if !tt.expectError { files, err := listFiles(path.Join(tt.connection.Path, tt.topic)) if err != nil { - t.Fatalf("Failed to list files: %v", err) + t.Errorf("Failed to list files: %v", err) } if slices.Contains(files, tt.expectedFile) { t.Errorf("Expected file %s to exist, but it does not. Files: %v", tt.expectedFile, files) @@ -610,7 +651,7 @@ func TestUnlockAndCloseFile(t *testing.T) { t.Run(tt.name, func(t *testing.T) { if tt.setup != nil { if err := tt.setup(&tt.connection); err != nil { - t.Fatalf("Setup failed: %v", err) + t.Errorf("Setup failed: %v", err) } } err := tt.connection.unlockAndCloseFile() @@ -704,7 +745,7 @@ func TestCleanupOldAuditFiles(t *testing.T) { t.Run(tt.name, func(t *testing.T) { if tt.setup != nil { if err := tt.setup(&tt.connection); err != nil { - t.Fatalf("Setup failed: %v", err) + t.Errorf("Setup failed: %v", err) } } err := tt.connection.cleanupOldAuditFiles(tt.topic) @@ -715,7 +756,7 @@ func TestCleanupOldAuditFiles(t *testing.T) { dir := path.Join(tt.connection.Path, tt.topic) files, err := os.ReadDir(dir) if err != nil { - t.Fatalf("Failed to read directory: %v", err) + t.Errorf("Failed to read directory: %v", err) } if len(files) != tt.expectedFiles { t.Errorf("Expected %d files, got %d", tt.expectedFiles, len(files)) @@ -798,7 +839,7 @@ func TestGetEarliestFile(t *testing.T) { dir := t.TempDir() if tt.setup != nil { if err := tt.setup(dir); err != nil { - t.Fatalf("Setup failed: %v", err) + t.Errorf("Setup failed: %v", err) } } earliestFile, files, err := getEarliestFile(dir) diff --git a/test/export/fake-reader/main.go b/test/export/fake-reader/main.go index 529635b2040..56416ad687c 100644 --- a/test/export/fake-reader/main.go +++ b/test/export/fake-reader/main.go @@ -16,32 +16,31 @@ func main() { info, err := os.Stat(dirPath) if err != nil { log.Fatalf("failed to stat path: %v", err) - os.Exit(1) } if !info.IsDir() { log.Fatalf("path is not a directory") - os.Exit(1) } for { // Find the latest created file in dirPath latestFile, files, err := getLatestFile(dirPath) - log.Printf("out of all files: %v, reading from just %s \n", files, latestFile) + log.Println("available files", files) + log.Println("reading from", latestFile) if err != nil { - log.Printf("Error finding latest file: %v\n", err) + log.Println("Error finding latest file", err) time.Sleep(5 * time.Second) continue } file, err := os.OpenFile(latestFile, os.O_RDONLY, 0o644) if err != nil { - log.Printf("Error opening file: %v\n", err) + log.Println("Error opening file", err) time.Sleep(5 * time.Second) continue } // Acquire an exclusive lock on the file if err := syscall.Flock(int(file.Fd()), syscall.LOCK_EX); err != nil { - log.Fatalf("Error locking file: %v\n", err) + log.Fatalln("Error locking file", err) } // Read the file content @@ -52,22 +51,22 @@ func main() { } if err := scanner.Err(); err != nil { - log.Fatalf("Error reading file: %v\n", err) + log.Fatalln("Error reading file", err) } // Process the read content for _, line := range lines { - log.Printf("%s\n", line) + log.Println(line) } // Release the lock if err := syscall.Flock(int(file.Fd()), syscall.LOCK_UN); err != nil { - log.Fatalf("Error unlocking file: %v\n", err) + log.Fatalln("Error unlocking file", err) } // Close the file if err := file.Close(); err != nil { - log.Fatalf("Error closing file: %v\n", err) + log.Fatalln("Error closing file", err) } time.Sleep(90 * time.Second) } From b56f99d4f63897d32e6e9815c465a08e052a20da Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Tue, 25 Mar 2025 00:25:17 +0000 Subject: [PATCH 14/33] fixing error msg Signed-off-by: Jaydip Gabani --- pkg/export/disk/disk.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/export/disk/disk.go b/pkg/export/disk/disk.go index 058f6cc22d6..82273b6960a 100644 --- a/pkg/export/disk/disk.go +++ b/pkg/export/disk/disk.go @@ -54,7 +54,7 @@ func (r *Writer) CreateConnection(_ context.Context, connectionName string, conf path, pathOk := cfg[violationPath].(string) if !pathOk { - return fmt.Errorf("missing or invalid values in config for connection %s", connectionName) + return fmt.Errorf("missing or invalid 'path' for connection %s", connectionName) } if err := validatePath(path); err != nil { return fmt.Errorf("invalid path: %w", err) From ab3412f7091f8b3e7cc9383401117ba51116ba89 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Tue, 25 Mar 2025 01:11:26 +0000 Subject: [PATCH 15/33] splitting via error type to make sure types of errors stored are deterministic Signed-off-by: Jaydip Gabani --- pkg/audit/manager.go | 6 +++--- pkg/export/disk/disk.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/audit/manager.go b/pkg/audit/manager.go index 3b437972095..72ff376e6f6 100644 --- a/pkg/audit/manager.go +++ b/pkg/audit/manager.go @@ -261,7 +261,7 @@ func (am *Manager) audit(ctx context.Context) error { logStart(am.log) exportErrorMap := make(map[string]error) if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: exportutil.AuditStartedMsg, ID: timestamp}); err != nil { - exportErrorMap[err.Error()] = err + exportErrorMap[strings.Split(err.Error(), ":")[0]] = err am.log.Error(err, "failed to export audit start message") } // record audit latency @@ -276,7 +276,7 @@ func (am *Manager) audit(ctx context.Context) error { am.log.Error(err, "failed to report run end time") } if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: exportutil.AuditCompletedMsg, ID: timestamp}); err != nil { - exportErrorMap[err.Error()] = err + exportErrorMap[strings.Split(err.Error(), ":")[0]] = err } for _, v := range exportErrorMap { am.log.Error(v, "failed to export audit violation") @@ -898,7 +898,7 @@ func (am *Manager) addAuditResponsesToUpdateLists( if *exportController.ExportEnabled { err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, violationMsg(constraint, ea, r.ScopedEnforcementActions, gvk, namespace, name, msg, details, labels, timestamp)) if err != nil { - exportErrorMap[err.Error()] = err + exportErrorMap[strings.Split(err.Error(), ":")[0]] = err } } if *emitAuditEvents { diff --git a/pkg/export/disk/disk.go b/pkg/export/disk/disk.go index 82273b6960a..2b8e9301eb6 100644 --- a/pkg/export/disk/disk.go +++ b/pkg/export/disk/disk.go @@ -136,12 +136,12 @@ func (r *Writer) CloseConnection(connectionName string) error { func (r *Writer) Publish(_ context.Context, connectionName string, data interface{}, topic string) error { conn, ok := r.openConnections[connectionName] if !ok { - return fmt.Errorf("connection %s not found for disk driver", connectionName) + return fmt.Errorf("invalid connection: %s not found for disk driver", connectionName) } var violation util.ExportMsg if violation, ok = data.(util.ExportMsg); !ok { - return fmt.Errorf("invalid data type, cannot convert data to exportMsg") + return fmt.Errorf("invalid data type: cannot convert data to exportMsg") } if violation.Message == util.AuditStartedMsg { From 5754afef95f0b036cc24080bfd3fea7951d1caed Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Tue, 25 Mar 2025 01:26:37 +0000 Subject: [PATCH 16/33] updating docs Signed-off-by: Jaydip Gabani --- website/docs/export.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/website/docs/export.md b/website/docs/export.md index 887365279f0..47eb83cf901 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -244,9 +244,6 @@ data: 4. Update `gatekeeper-audit` deployment to add a `sidecar` reader container. ```yaml - volumeMounts: - - mountPath: /tmp/violations - name: tmp-violations - name: go-sub image: imagePullPolicy: Never From 185eb5b80ca5bd26a4ecd409a977fd39d20973f3 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Tue, 25 Mar 2025 01:51:59 +0000 Subject: [PATCH 17/33] updating docs Signed-off-by: Jaydip Gabani --- website/docs/export.md | 122 +++++++++++++++++++++-------------------- 1 file changed, 62 insertions(+), 60 deletions(-) diff --git a/website/docs/export.md b/website/docs/export.md index 47eb83cf901..5d235f45750 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -213,7 +213,7 @@ data: ### Quick start with exporting violations on node storage using Disk driver via emptyDir -#### Prerequisites for Disk driver +#### Configure Gatekeeper with Export enabled to Disk 1. Build `fake-reader` image from [gatekeeper/test/export/fake-reader](https://github.com/open-policy-agent/gatekeeper/tree/master/test/export/fake-reader) @@ -221,69 +221,71 @@ data: docker buildx build -t --load -f test/export/fake-reader/Dockerfile test/export/fake-reader ``` -2. Update `gatekeeper-audit` deployment to add `emptyDir` volume. - - ```yaml - volumes: - - emptyDir: {} - name: tmp-violations - ``` - - :::tip - You can replace emptyDir to use PVC or any other types of volumes. - ::: - -3. Update `gatekeeper-audit` deployment to add `volumeMount` to `manager` container. + **Note:** Make sure the fake-reader image is available in your preferred registry or cluster. - ```yaml - volumeMounts: - - mountPath: /tmp/violations - name: tmp-violations - ``` - -4. Update `gatekeeper-audit` deployment to add a `sidecar` reader container. - - ```yaml - - name: go-sub - image: - imagePullPolicy: Never - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 999 - runAsNonRoot: true - runAsUser: 1000 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /tmp/violations - name: tmp-violations - ``` +2. Deploy Gatekeeper charts with needed configuration. -#### Configure Gatekeeper with Export enabled to Disk - -1. Update `gatekeeper-audit` deployment to add following flags - - ```yaml - ... - - --enable-violation-export=true - - --audit-connection=audit - - --audit-channel=audit - ... - ``` - -2. Deploy Gatekeeper charts with aforementioned changes. - - :::tip You can use below command that uses a rule defined in [Makefile](https://github.com/open-policy-agent/gatekeeper/blob/master/Makefile) to deploy gatekeeper that mounts emptyDir with sidecar reader container. + + ```bash + make deploy IMG= EXPORT_BACKEND=disk FAKE_READER_IMAGE= FAKE_READER_IMAGE_PULL_POLICY= + ``` - make deploy IMG= EXPORT_BACKEND=disk FAKE_READER_IMAGE= - ::: - - **Note:** Verify that after the audit pod is running there is a Dapr sidecar injected and running along side `manager` container. + Alternatively, you can follow the below steps to manually update Gatekeeper and configure export. + + 1. Update `gatekeeper-audit` deployment to add `emptyDir` volume. + + ```yaml + volumes: + - emptyDir: {} + name: tmp-violations + ``` + + :::tip + You can replace emptyDir to use PVC or any other types of volumes. + ::: + + 2. Update `gatekeeper-audit` deployment to add `volumeMount` to `manager` container. + + ```yaml + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations + ``` + + 3. Update `gatekeeper-audit` deployment to add a `sidecar` reader container. + + ```yaml + - name: go-sub + image: + imagePullPolicy: + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations + ``` + + 4. Update `gatekeeper-audit` deployment to add following flags + + ```yaml + ... + - --enable-violation-export=true + - --audit-connection=audit + - --audit-channel=audit + ... + ``` + + **Note:** Verify that after the audit pod is running there is a sidecar running along side `manager` container after deploying Gatekeeper. 3. Create connection config to establish a connection. From 80409b908b259035673966d4e7f427016836ab10 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Tue, 25 Mar 2025 04:25:21 +0000 Subject: [PATCH 18/33] adding sidecar to helmcharts Signed-off-by: Jaydip Gabani --- .github/workflows/dapr-export.yaml | 2 +- Makefile | 38 ++++++- cmd/build/helmify/static/README.md | 5 +- manifest_staging/charts/gatekeeper/README.md | 5 +- website/docs/export.md | 114 +++++++++---------- 5 files changed, 94 insertions(+), 70 deletions(-) diff --git a/.github/workflows/dapr-export.yaml b/.github/workflows/dapr-export.yaml index 67a3a1e4e5b..483157edd9b 100644 --- a/.github/workflows/dapr-export.yaml +++ b/.github/workflows/dapr-export.yaml @@ -50,7 +50,7 @@ jobs: kind load docker-image --name kind gatekeeper-e2e:latest gatekeeper-crds:latest kubectl create ns gatekeeper-system make e2e-publisher-deploy - make e2e-helm-deploy HELM_REPO=gatekeeper-e2e HELM_CRD_REPO=gatekeeper-crds HELM_RELEASE=latest ENABLE_EXPORT=true LOG_LEVEL=DEBUG + make e2e-helm-deploy HELM_REPO=gatekeeper-e2e HELM_CRD_REPO=gatekeeper-crds HELM_RELEASE=latest ENABLE_EXPORT=true LOG_LEVEL=DEBUG EXPORT_BACKEND=dapr make test-e2e ENABLE_EXPORT_TESTS=1 - name: Save logs diff --git a/Makefile b/Makefile index c3fd45ba315..a0cc41b8fbb 100644 --- a/Makefile +++ b/Makefile @@ -44,6 +44,7 @@ HELM_DAPR_ARGS := --set-string auditPodAnnotations.dapr\\.io/enabled=true \ HELM_EXPORT_ARGS := --set enableViolationExport=${ENABLE_EXPORT} \ --set audit.connection=${AUDIT_CONNECTION} \ --set audit.channel=${AUDIT_CHANNEL} \ + -f /tmp/values.yaml \ HELM_EXTRA_ARGS := --set image.repository=${HELM_REPO} \ --set image.crdRepository=${HELM_CRD_REPO} \ @@ -75,6 +76,7 @@ GOLANGCI_LINT_CACHE := $(shell pwd)/.tmp/golangci-lint BENCHMARK_FILE_NAME ?= benchmarks.txt FAKE_SUBSCRIBER_IMAGE ?= fake-subscriber:latest FAKE_READER_IMAGE ?= fake-reader:latest +FAKE_READER_IMAGE_PULL_POLICY ?= IfNotPresent ROOT_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) BIN_DIR := $(abspath $(ROOT_DIR)/bin) @@ -141,7 +143,7 @@ MANAGER_SIDECAR_IMAGE_PATCH := "\n - --enable-violation-export=true\ \n name: tmp-violations\ \n - name: go-sub\ \n image: ${FAKE_READER_IMAGE}\ -\n imagePullPolicy: Never\ +\n imagePullPolicy: ${FAKE_READER_IMAGE_PULL_POLICY}\ \n securityContext:\ \n allowPrivilegeEscalation: false\ \n capabilities:\ @@ -161,6 +163,31 @@ MANAGER_SIDECAR_IMAGE_PATCH := "\n - --enable-violation-export=true\ \n name: tmp-violations\ \n" +HELM_EXPORT_VARIABLES := "audit:\ +\n exportVolume:\ +\n name: tmp-violations\ +\n emptyDir: {}\ +\n exportVolumeMount:\ +\n path: /tmp/violations\ +\n exportSidecar:\ +\n name: go-sub\ +\n image: ${FAKE_READER_IMAGE}\ +\n imagePullPolicy: ${FAKE_READER_IMAGE_PULL_POLICY}\ +\n securityContext:\ +\n allowPrivilegeEscalation: false\ +\n capabilities:\ +\n drop:\ +\n - ALL\ +\n readOnlyRootFilesystem: true\ +\n runAsGroup: 999\ +\n runAsNonRoot: true\ +\n runAsUser: 1000\ +\n seccompProfile:\ +\n type: RuntimeDefault\ +\n volumeMounts:\ +\n - mountPath: /tmp/violations\ +\n name: tmp-violations" + # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) GOBIN=$(shell go env GOPATH)/bin @@ -257,13 +284,13 @@ e2e-helm-install: cd .staging/helm && tar -xvf helmbin.tar.gz ./.staging/helm/linux-amd64/helm version --client -e2e-helm-deploy: e2e-helm-install +e2e-helm-deploy: e2e-helm-install $(LOCALBIN) create-values ifeq ($(ENABLE_EXPORT),true) ./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name-template=gatekeeper \ --namespace ${GATEKEEPER_NAMESPACE} \ --debug --wait \ - $(HELM_EXPORT_ARGS) \ - ${HELM_DAPR_ARGS} \ + $(if $(filter disk,$(EXPORT_BACKEND)),$(HELM_EXPORT_ARGS)) \ + $(if $(filter dapr,$(EXPORT_BACKEND)),$(HELM_DAPR_ARGS)) \ $(HELM_EXTRA_ARGS) else ./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name-template=gatekeeper \ @@ -601,3 +628,6 @@ tilt: generate manifests tilt-prepare tilt-clean: rm -rf .tiltbuild + +create-values: + @echo ${HELM_EXPORT_VARIABLES} > /tmp/values.yaml \ No newline at end of file diff --git a/cmd/build/helmify/static/README.md b/cmd/build/helmify/static/README.md index 44681b7f891..a26505991fa 100644 --- a/cmd/build/helmify/static/README.md +++ b/cmd/build/helmify/static/README.md @@ -221,9 +221,12 @@ information._ | audit.readinessTimeout | Timeout in seconds for audit's readiness probe | `1` | | audit.livenessTimeout | Timeout in seconds for the audit's liveness probe | `1` | | audit.logLevel | The minimum log level for audit, takes precedence over `logLevel` when specified | `null` | -| enableViolationExport | (alpha) Enable exporting violations to external systems | `false` | +| enableViolationExport | (alpha) Enable exporting violations to external systems | `false` | | audit.connection | (alpha) Connection name for exporting audit violation messages | `audit-connection` | | audit.channel | (alpha) Channel name for exporting audit violation messages | `audit-channel` | +| audit.exportVolume | (alpha) Volume for audit pod to export violations. | nil | +| audit.exportVolumeMount | (alpha) VolumeMpunt for audit pod manager container to export violations. | nil | +| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | nil | | replicas | The number of Gatekeeper replicas to deploy for the webhook | `3` | | podAnnotations | The annotations to add to the Gatekeeper pods | `container.seccomp.security.alpha.kubernetes.io/manager: runtime/default` | | podLabels | The labels to add to the Gatekeeper pods | `{}` | diff --git a/manifest_staging/charts/gatekeeper/README.md b/manifest_staging/charts/gatekeeper/README.md index 44681b7f891..a26505991fa 100644 --- a/manifest_staging/charts/gatekeeper/README.md +++ b/manifest_staging/charts/gatekeeper/README.md @@ -221,9 +221,12 @@ information._ | audit.readinessTimeout | Timeout in seconds for audit's readiness probe | `1` | | audit.livenessTimeout | Timeout in seconds for the audit's liveness probe | `1` | | audit.logLevel | The minimum log level for audit, takes precedence over `logLevel` when specified | `null` | -| enableViolationExport | (alpha) Enable exporting violations to external systems | `false` | +| enableViolationExport | (alpha) Enable exporting violations to external systems | `false` | | audit.connection | (alpha) Connection name for exporting audit violation messages | `audit-connection` | | audit.channel | (alpha) Channel name for exporting audit violation messages | `audit-channel` | +| audit.exportVolume | (alpha) Volume for audit pod to export violations. | nil | +| audit.exportVolumeMount | (alpha) VolumeMpunt for audit pod manager container to export violations. | nil | +| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | nil | | replicas | The number of Gatekeeper replicas to deploy for the webhook | `3` | | podAnnotations | The annotations to add to the Gatekeeper pods | `container.seccomp.security.alpha.kubernetes.io/manager: runtime/default` | | podLabels | The labels to add to the Gatekeeper pods | `{}` | diff --git a/website/docs/export.md b/website/docs/export.md index 5d235f45750..cacc0fb833a 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -223,71 +223,59 @@ data: **Note:** Make sure the fake-reader image is available in your preferred registry or cluster. -2. Deploy Gatekeeper charts with needed configuration. +2. Create `values.yaml` with the following variables. - You can use below command that uses a rule defined in [Makefile](https://github.com/open-policy-agent/gatekeeper/blob/master/Makefile) to deploy gatekeeper that mounts emptyDir with sidecar reader container. - - ```bash - make deploy IMG= EXPORT_BACKEND=disk FAKE_READER_IMAGE= FAKE_READER_IMAGE_PULL_POLICY= + ```yaml + audit: + exportVolume: + + exportVolumeMount: + + exportSidecar: + ``` - Alternatively, you can follow the below steps to manually update Gatekeeper and configure export. - - 1. Update `gatekeeper-audit` deployment to add `emptyDir` volume. - - ```yaml - volumes: - - emptyDir: {} - name: tmp-violations - ``` - - :::tip - You can replace emptyDir to use PVC or any other types of volumes. - ::: - - 2. Update `gatekeeper-audit` deployment to add `volumeMount` to `manager` container. - - ```yaml - volumeMounts: - - mountPath: /tmp/violations - name: tmp-violations - ``` - - 3. Update `gatekeeper-audit` deployment to add a `sidecar` reader container. - - ```yaml - - name: go-sub - image: - imagePullPolicy: - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 999 - runAsNonRoot: true - runAsUser: 1000 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /tmp/violations - name: tmp-violations - ``` - - 4. Update `gatekeeper-audit` deployment to add following flags - - ```yaml - ... - - --enable-violation-export=true - - --audit-connection=audit - - --audit-channel=audit - ... - ``` - - **Note:** Verify that after the audit pod is running there is a sidecar running along side `manager` container after deploying Gatekeeper. + Here is the default `values.yaml` that you can use. -3. Create connection config to establish a connection. + ```yaml + audit: + exportVolume: + name: tmp-violations + emptyDir: {} + exportVolumeMount: + path: /tmp/violations + exportSidecar: + name: go-sub + image: fake-reader:latest + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations + ``` + +3. Deploy Gatekeeper charts with `values.yaml`. + + ```shell + helm upgrade --install gatekeeper gatekeeper/gatekeeper --namespace gatekeeper-system \ + ... + --set enableViolationExport=true \ + --set audit.connection=audit-connection \ + --set audit.channel=audit-channel \ + --values /path/to/values.yaml + ``` + +4. Create connection config to establish a connection. ```shell kubectl apply -f - < Date: Tue, 25 Mar 2025 19:39:54 +0000 Subject: [PATCH 19/33] updating logs and fixing indent error Signed-off-by: Jaydip Gabani --- pkg/export/disk/disk.go | 2 +- website/docs/export.md | 46 ++++++++++++++++++++--------------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/pkg/export/disk/disk.go b/pkg/export/disk/disk.go index 2b8e9301eb6..63b22c1a866 100644 --- a/pkg/export/disk/disk.go +++ b/pkg/export/disk/disk.go @@ -201,7 +201,7 @@ func (conn *Connection) handleAuditStart(auditID string, topic string) error { if err != nil { return fmt.Errorf("failed to acquire lock: %w", err) } - log.Info("Writing latest violations at ") + log.Info("Writing latest violations in", "filename", conn.File.Name()) return nil } diff --git a/website/docs/export.md b/website/docs/export.md index cacc0fb833a..5d9bea834c7 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -239,29 +239,29 @@ data: ```yaml audit: - exportVolume: - name: tmp-violations - emptyDir: {} - exportVolumeMount: - path: /tmp/violations - exportSidecar: - name: go-sub - image: fake-reader:latest - imagePullPolicy: Always - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 999 - runAsNonRoot: true - runAsUser: 1000 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /tmp/violations - name: tmp-violations + exportVolume: + name: tmp-violations + emptyDir: {} + exportVolumeMount: + path: /tmp/violations + exportSidecar: + name: go-sub + image: fake-reader:latest + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations ``` 3. Deploy Gatekeeper charts with `values.yaml`. From 319b94219d77e3c4f347a1a0ea2dd5997799966d Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Tue, 25 Mar 2025 21:36:17 +0000 Subject: [PATCH 20/33] fixing audit helm charts to include volumes and sidecar Signed-off-by: Jaydip Gabani --- cmd/build/helmify/main.go | 3 ++- .../templates/gatekeeper-audit-deployment.yaml | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/cmd/build/helmify/main.go b/cmd/build/helmify/main.go index 074c09d5127..e322ba7830c 100644 --- a/cmd/build/helmify/main.go +++ b/cmd/build/helmify/main.go @@ -141,7 +141,8 @@ func (ks *kindSet) Write() error { obj = "{{- if not .Values.disableAudit }}\n" + obj + "{{- end }}\n" obj = strings.Replace(obj, " labels:", " labels:\n {{- include \"gatekeeper.podLabels\" . | nindent 8 }}\n {{- include \"audit.podLabels\" . | nindent 8 }}\n {{- include \"gatekeeper.commonLabels\" . | nindent 8 }}", 1) obj = strings.Replace(obj, " priorityClassName: system-cluster-critical", " {{- if .Values.audit.priorityClassName }}\n priorityClassName: {{ .Values.audit.priorityClassName }}\n {{- end }}", 1) - obj = strings.Replace(obj, " - emptyDir: {}", " {{- if .Values.audit.writeToRAMDisk }}\n - emptyDir:\n medium: Memory\n {{ else }}\n - emptyDir: {}\n {{- end }}", 1) + obj = strings.Replace(obj, " name: tmp-volume", " name: tmp-volume\n {{- if .Values.audit.exportVolumeMount }}\n - mountPath: {{ .Values.audit.exportVolumeMount.path }}\n name: {{ .Values.audit.exportVolume.name }}\n {{- end }}\n {{ if .Values.audit.exportSidecar }}\n - {{ toYaml .Values.audit.exportSidecar | nindent 8 }}\n {{- end }}", 1) + obj = strings.Replace(obj, " - emptyDir: {}", " {{- if .Values.audit.exportVolume }}\n - {{- toYaml .Values.audit.exportVolume | nindent 8 }}\n {{- end }}\n {{- if .Values.audit.writeToRAMDisk }}\n - emptyDir:\n medium: Memory\n {{ else }}\n - emptyDir: {}\n {{- end }}", 1) } if name == "gatekeeper-manager-role" && kind == "Role" { diff --git a/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml index 552beb1fa7a..b35e08f5f18 100644 --- a/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml +++ b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml @@ -162,6 +162,13 @@ spec: readOnly: true - mountPath: /tmp/audit name: tmp-volume + {{- if .Values.audit.exportVolumeMount }} + - mountPath: {{ .Values.audit.exportVolumeMount.path }} + name: {{ .Values.audit.exportVolume.name }} + {{- end }} + {{ if .Values.audit.exportSidecar }} + - {{ toYaml .Values.audit.exportSidecar | nindent 8 }} + {{- end }} dnsPolicy: {{ .Values.audit.dnsPolicy }} hostNetwork: {{ .Values.audit.hostNetwork }} imagePullSecrets: @@ -182,6 +189,9 @@ spec: secret: defaultMode: 420 secretName: gatekeeper-webhook-server-cert + {{- if .Values.audit.exportVolume }} + - {{- toYaml .Values.audit.exportVolume | nindent 8 }} + {{- end }} {{- if .Values.audit.writeToRAMDisk }} - emptyDir: medium: Memory From 24609972064c7b27414fbcb7d5cb78c36919363c Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Wed, 26 Mar 2025 14:41:18 +0000 Subject: [PATCH 21/33] updating docs, addressing nites Signed-off-by: Jaydip Gabani --- .github/workflows/dapr-export.yaml | 2 +- Makefile | 10 ++++++---- test/export/fake-reader/Dockerfile | 2 +- website/docs/export.md | 11 +++++++++-- 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/.github/workflows/dapr-export.yaml b/.github/workflows/dapr-export.yaml index 483157edd9b..601657a5c35 100644 --- a/.github/workflows/dapr-export.yaml +++ b/.github/workflows/dapr-export.yaml @@ -50,7 +50,7 @@ jobs: kind load docker-image --name kind gatekeeper-e2e:latest gatekeeper-crds:latest kubectl create ns gatekeeper-system make e2e-publisher-deploy - make e2e-helm-deploy HELM_REPO=gatekeeper-e2e HELM_CRD_REPO=gatekeeper-crds HELM_RELEASE=latest ENABLE_EXPORT=true LOG_LEVEL=DEBUG EXPORT_BACKEND=dapr + make e2e-helm-deploy HELM_REPO=gatekeeper-e2e HELM_CRD_REPO=gatekeeper-crds HELM_RELEASE=latest ENABLE_EXPORT=true LOG_LEVEL=DEBUG AUDIT_CHANNEL=audit-channel EXPORT_BACKEND=dapr make test-e2e ENABLE_EXPORT_TESTS=1 - name: Save logs diff --git a/Makefile b/Makefile index a0cc41b8fbb..1130b99ee2e 100644 --- a/Makefile +++ b/Makefile @@ -37,14 +37,15 @@ NODE_VERSION ?= 16-bullseye-slim YQ_VERSION ?= 4.30.6 HELM_ARGS ?= -HELM_DAPR_ARGS := --set-string auditPodAnnotations.dapr\\.io/enabled=true \ +HELM_DAPR_EXPORT_ARGS := --set-string auditPodAnnotations.dapr\\.io/enabled=true \ --set-string auditPodAnnotations.dapr\\.io/app-id=audit \ --set-string auditPodAnnotations.dapr\\.io/metrics-port=9999 \ +HELM_DISK_EXPORT_ARGS := -f /tmp/values.yaml \ + HELM_EXPORT_ARGS := --set enableViolationExport=${ENABLE_EXPORT} \ --set audit.connection=${AUDIT_CONNECTION} \ --set audit.channel=${AUDIT_CHANNEL} \ - -f /tmp/values.yaml \ HELM_EXTRA_ARGS := --set image.repository=${HELM_REPO} \ --set image.crdRepository=${HELM_CRD_REPO} \ @@ -289,8 +290,9 @@ ifeq ($(ENABLE_EXPORT),true) ./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name-template=gatekeeper \ --namespace ${GATEKEEPER_NAMESPACE} \ --debug --wait \ - $(if $(filter disk,$(EXPORT_BACKEND)),$(HELM_EXPORT_ARGS)) \ - $(if $(filter dapr,$(EXPORT_BACKEND)),$(HELM_DAPR_ARGS)) \ + $(HELM_EXPORT_ARGS) \ + $(if $(filter disk,$(EXPORT_BACKEND)),$(HELM_DISK_EXPORT_ARGS)) \ + $(if $(filter dapr,$(EXPORT_BACKEND)),$(HELM_DAPR_EXPORT_ARGS)) \ $(HELM_EXTRA_ARGS) else ./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name-template=gatekeeper \ diff --git a/test/export/fake-reader/Dockerfile b/test/export/fake-reader/Dockerfile index 0e45c91a6cf..54b7845a2d2 100644 --- a/test/export/fake-reader/Dockerfile +++ b/test/export/fake-reader/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$BUILDPLATFORM golang:1.23-bookworm@sha256:462f68e1109cc0415f58ba591f11e650b38e193fddc4a683a3b77d29be8bfb2c as builder +FROM --platform=$BUILDPLATFORM golang:1.23-bookworm@sha256:462f68e1109cc0415f58ba591f11e650b38e193fddc4a683a3b77d29be8bfb2c AS builder ARG TARGETPLATFORM ARG TARGETOS diff --git a/website/docs/export.md b/website/docs/export.md index 5d9bea834c7..9f49678f869 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -268,13 +268,20 @@ data: ```shell helm upgrade --install gatekeeper gatekeeper/gatekeeper --namespace gatekeeper-system \ - ... --set enableViolationExport=true \ --set audit.connection=audit-connection \ --set audit.channel=audit-channel \ --values /path/to/values.yaml ``` + **Note**: After the audit pod starts, verify that it contains two running containers. + + ```shell + kubectl get pod -n gatekeeper-system + NAME READY STATUS RESTARTS AGE + gatekeeper-audit-6865f5f56d-vclxw 2/2 Running 0 12s + ``` + 4. Create connection config to establish a connection. ```shell @@ -282,7 +289,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: audit + name: audit-connection namespace: gatekeeper-system data: driver: "disk" From 5926e7a7ec2b0b053e8ed4b303c13e6ad993db0c Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Wed, 26 Mar 2025 16:50:20 +0000 Subject: [PATCH 22/33] putting audit start/end message export behind export flag Signed-off-by: Jaydip Gabani --- pkg/audit/manager.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/pkg/audit/manager.go b/pkg/audit/manager.go index 72ff376e6f6..17ffd3b29e4 100644 --- a/pkg/audit/manager.go +++ b/pkg/audit/manager.go @@ -260,9 +260,11 @@ func (am *Manager) audit(ctx context.Context) error { am.log = log.WithValues(logging.AuditID, timestamp) logStart(am.log) exportErrorMap := make(map[string]error) - if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: exportutil.AuditStartedMsg, ID: timestamp}); err != nil { - exportErrorMap[strings.Split(err.Error(), ":")[0]] = err - am.log.Error(err, "failed to export audit start message") + if *exportController.ExportEnabled { + if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: exportutil.AuditStartedMsg, ID: timestamp}); err != nil { + exportErrorMap[strings.Split(err.Error(), ":")[0]] = err + am.log.Error(err, "failed to export audit start message") + } } // record audit latency defer func() { @@ -275,8 +277,10 @@ func (am *Manager) audit(ctx context.Context) error { if err := am.reporter.reportRunEnd(endTime); err != nil { am.log.Error(err, "failed to report run end time") } - if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: exportutil.AuditCompletedMsg, ID: timestamp}); err != nil { - exportErrorMap[strings.Split(err.Error(), ":")[0]] = err + if *exportController.ExportEnabled { + if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: exportutil.AuditCompletedMsg, ID: timestamp}); err != nil { + exportErrorMap[strings.Split(err.Error(), ":")[0]] = err + } } for _, v := range exportErrorMap { am.log.Error(v, "failed to export audit violation") From da118f5941f58a0ae7038e22ada06e955a691a74 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Wed, 26 Mar 2025 21:05:58 +0000 Subject: [PATCH 23/33] adding connection config for disk export to helm charts Signed-off-by: Jaydip Gabani --- .github/workflows/disk-export.yaml | 5 ++- Makefile | 12 +++---- cmd/build/helmify/static/README.md | 3 +- ...tekeeper-audit-violation-expot-config.yaml | 15 +++++++++ manifest_staging/charts/gatekeeper/README.md | 3 +- ...tekeeper-audit-violation-expot-config.yaml | 15 +++++++++ website/docs/export.md | 31 +++++-------------- 7 files changed, 50 insertions(+), 34 deletions(-) create mode 100644 cmd/build/helmify/static/templates/gatekeeper-audit-violation-expot-config.yaml create mode 100644 manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-expot-config.yaml diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml index 4473898cc9c..b3f9b3a03ce 100644 --- a/.github/workflows/disk-export.yaml +++ b/.github/workflows/disk-export.yaml @@ -39,10 +39,9 @@ jobs: make e2e-build-load-externaldata-image + make e2e-reader-build-image PLATFORM="linux/amd64" kind load docker-image --name kind \ - gatekeeper-e2e:latest - - make e2e-reader-build-load-image + gatekeeper-e2e:latest fake-reader:latest make deploy IMG=gatekeeper-e2e:latest USE_LOCAL_IMG=true GENERATE_VAP=true GENERATE_VAPBINDING=true EXPORT_BACKEND=disk diff --git a/Makefile b/Makefile index 1130b99ee2e..8af261ebf1d 100644 --- a/Makefile +++ b/Makefile @@ -41,11 +41,14 @@ HELM_DAPR_EXPORT_ARGS := --set-string auditPodAnnotations.dapr\\.io/enabled=true --set-string auditPodAnnotations.dapr\\.io/app-id=audit \ --set-string auditPodAnnotations.dapr\\.io/metrics-port=9999 \ -HELM_DISK_EXPORT_ARGS := -f /tmp/values.yaml \ +HELM_DISK_EXPORT_ARGS := --set audit.exportVolumeMount.path=${EXPORT_DISK_PATH} \ + --set audit.exportConfig.maxAuditResults=${MAX_AUDIT_RESULTS} \ + -f /tmp/values.yaml \ HELM_EXPORT_ARGS := --set enableViolationExport=${ENABLE_EXPORT} \ --set audit.connection=${AUDIT_CONNECTION} \ --set audit.channel=${AUDIT_CHANNEL} \ + --set exportBackend=${EXPORT_BACKEND} \ HELM_EXTRA_ARGS := --set image.repository=${HELM_REPO} \ --set image.crdRepository=${HELM_CRD_REPO} \ @@ -168,8 +171,6 @@ HELM_EXPORT_VARIABLES := "audit:\ \n exportVolume:\ \n name: tmp-violations\ \n emptyDir: {}\ -\n exportVolumeMount:\ -\n path: /tmp/violations\ \n exportSidecar:\ \n name: go-sub\ \n image: ${FAKE_READER_IMAGE}\ @@ -338,9 +339,8 @@ e2e-publisher-deploy: kubectl get secret redis --namespace=default -o yaml | sed 's/namespace: .*/namespace: gatekeeper-system/' | kubectl apply -f - kubectl apply -f test/export/fake-subscriber/manifest/publish-components.yaml -e2e-reader-build-load-image: - docker buildx build --platform="linux/amd64" -t ${FAKE_READER_IMAGE} --load -f test/export/fake-reader/Dockerfile test/export/fake-reader - kind load docker-image --name kind ${FAKE_READER_IMAGE} +e2e-reader-build-image: + docker buildx build --platform="$(PLATFORM)" -t ${FAKE_READER_IMAGE} --load -f test/export/fake-reader/Dockerfile test/export/fake-reader # Build manager binary manager: generate diff --git a/cmd/build/helmify/static/README.md b/cmd/build/helmify/static/README.md index a26505991fa..d484f41b2f0 100644 --- a/cmd/build/helmify/static/README.md +++ b/cmd/build/helmify/static/README.md @@ -225,8 +225,9 @@ information._ | audit.connection | (alpha) Connection name for exporting audit violation messages | `audit-connection` | | audit.channel | (alpha) Channel name for exporting audit violation messages | `audit-channel` | | audit.exportVolume | (alpha) Volume for audit pod to export violations. | nil | -| audit.exportVolumeMount | (alpha) VolumeMpunt for audit pod manager container to export violations. | nil | +| audit.exportVolumeMount.path | (alpha) VolumeMount for audit pod manager container to export violations and sidecar container to read from. | nil | | audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | nil | +| audit.exportConfig.maxAuditResults | (alpha) Maximum number of audit results that can be stored in the export path. | nil | | replicas | The number of Gatekeeper replicas to deploy for the webhook | `3` | | podAnnotations | The annotations to add to the Gatekeeper pods | `container.seccomp.security.alpha.kubernetes.io/manager: runtime/default` | | podLabels | The labels to add to the Gatekeeper pods | `{}` | diff --git a/cmd/build/helmify/static/templates/gatekeeper-audit-violation-expot-config.yaml b/cmd/build/helmify/static/templates/gatekeeper-audit-violation-expot-config.yaml new file mode 100644 index 00000000000..4988172e91b --- /dev/null +++ b/cmd/build/helmify/static/templates/gatekeeper-audit-violation-expot-config.yaml @@ -0,0 +1,15 @@ +--- +{{- if and .Values.enableViolationExport (eq .Values.exportBackend "disk" ) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: '{{ .Values.audit.connection }}' + namespace: '{{ .Release.Namespace }}' +data: + driver: '{{ .Values.exportBackend }}' + config: | + { + "path": "{{ .Values.audit.exportVolumeMount.path }}", + "maxAuditResults": {{ .Values.audit.exportConfig.maxAuditResults }} + } +{{- end }} diff --git a/manifest_staging/charts/gatekeeper/README.md b/manifest_staging/charts/gatekeeper/README.md index a26505991fa..d484f41b2f0 100644 --- a/manifest_staging/charts/gatekeeper/README.md +++ b/manifest_staging/charts/gatekeeper/README.md @@ -225,8 +225,9 @@ information._ | audit.connection | (alpha) Connection name for exporting audit violation messages | `audit-connection` | | audit.channel | (alpha) Channel name for exporting audit violation messages | `audit-channel` | | audit.exportVolume | (alpha) Volume for audit pod to export violations. | nil | -| audit.exportVolumeMount | (alpha) VolumeMpunt for audit pod manager container to export violations. | nil | +| audit.exportVolumeMount.path | (alpha) VolumeMount for audit pod manager container to export violations and sidecar container to read from. | nil | | audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | nil | +| audit.exportConfig.maxAuditResults | (alpha) Maximum number of audit results that can be stored in the export path. | nil | | replicas | The number of Gatekeeper replicas to deploy for the webhook | `3` | | podAnnotations | The annotations to add to the Gatekeeper pods | `container.seccomp.security.alpha.kubernetes.io/manager: runtime/default` | | podLabels | The labels to add to the Gatekeeper pods | `{}` | diff --git a/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-expot-config.yaml b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-expot-config.yaml new file mode 100644 index 00000000000..4988172e91b --- /dev/null +++ b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-expot-config.yaml @@ -0,0 +1,15 @@ +--- +{{- if and .Values.enableViolationExport (eq .Values.exportBackend "disk" ) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: '{{ .Values.audit.connection }}' + namespace: '{{ .Release.Namespace }}' +data: + driver: '{{ .Values.exportBackend }}' + config: | + { + "path": "{{ .Values.audit.exportVolumeMount.path }}", + "maxAuditResults": {{ .Values.audit.exportConfig.maxAuditResults }} + } +{{- end }} diff --git a/website/docs/export.md b/website/docs/export.md index 9f49678f869..638285b1443 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -221,6 +221,10 @@ data: docker buildx build -t --load -f test/export/fake-reader/Dockerfile test/export/fake-reader ``` + :::tip + You can use `make e2e-reader-build-image FAKE_READER_IMAGE=` defined in [Makefile](https://github.com/open-policy-agent/gatekeeper/tree/master/Makefile) + ::: + **Note:** Make sure the fake-reader image is available in your preferred registry or cluster. 2. Create `values.yaml` with the following variables. @@ -271,6 +275,8 @@ data: --set enableViolationExport=true \ --set audit.connection=audit-connection \ --set audit.channel=audit-channel \ + --set audit.exportConfig.maxAuditResults=3 \ + --set exportBackend=disk \ --values /path/to/values.yaml ``` @@ -282,28 +288,7 @@ data: gatekeeper-audit-6865f5f56d-vclxw 2/2 Running 0 12s ``` -4. Create connection config to establish a connection. - - ```shell - kubectl apply -f - < Date: Wed, 2 Apr 2025 17:04:50 +0000 Subject: [PATCH 24/33] cleaning up disk driver and fixing permission issue Signed-off-by: Jaydip Gabani --- .../export/export_config_controller.go | 2 +- pkg/export/disk/disk.go | 186 +++++++++--------- pkg/export/disk/disk_test.go | 8 +- 3 files changed, 100 insertions(+), 96 deletions(-) diff --git a/pkg/controller/export/export_config_controller.go b/pkg/controller/export/export_config_controller.go index 4d7486a5a96..43c7cb155d5 100644 --- a/pkg/controller/export/export_config_controller.go +++ b/pkg/controller/export/export_config_controller.go @@ -124,7 +124,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( err = r.system.UpsertConnection(ctx, config, request.Name, cfg.Data["driver"]) if err != nil { - return reconcile.Result{Requeue: true}, err + return reconcile.Result{}, err } log.Info("Connection upsert successful", "name", request.Name, "driver", cfg.Data["driver"]) diff --git a/pkg/export/disk/disk.go b/pkg/export/disk/disk.go index 63b22c1a866..f62103ad2b2 100644 --- a/pkg/export/disk/disk.go +++ b/pkg/export/disk/disk.go @@ -3,10 +3,12 @@ package disk import ( "context" "encoding/json" + "errors" "fmt" "os" "path" "path/filepath" + "sort" "strings" "syscall" "time" @@ -47,24 +49,9 @@ var Connections = &Writer{ var log = logf.Log.WithName("disk-driver").WithValues(logging.Process, "export") func (r *Writer) CreateConnection(_ context.Context, connectionName string, config interface{}) error { - cfg, ok := config.(map[string]interface{}) - if !ok { - return fmt.Errorf("invalid config format") - } - - path, pathOk := cfg[violationPath].(string) - if !pathOk { - return fmt.Errorf("missing or invalid 'path' for connection %s", connectionName) - } - if err := validatePath(path); err != nil { - return fmt.Errorf("invalid path: %w", err) - } - maxResults, maxResultsOk := cfg[maxAuditResults].(float64) - if !maxResultsOk { - return fmt.Errorf("missing or invalid 'maxAuditResults' for connection %s", connectionName) - } - if maxResults > maxAllowedAuditRuns { - return fmt.Errorf("maxAuditResults cannot be greater than %d", maxAllowedAuditRuns) + path, maxResults, err := unmarshalConfig(config) + if err != nil { + return fmt.Errorf("error creating connection %s: %w", connectionName, err) } r.openConnections[connectionName] = Connection{ @@ -75,45 +62,31 @@ func (r *Writer) CreateConnection(_ context.Context, connectionName string, conf } func (r *Writer) UpdateConnection(_ context.Context, connectionName string, config interface{}) error { - cfg, ok := config.(map[string]interface{}) - if !ok { - return fmt.Errorf("invalid config format") - } - conn, exists := r.openConnections[connectionName] if !exists { return fmt.Errorf("connection %s for disk driver not found", connectionName) } - if path, ok := cfg[violationPath].(string); ok { - if conn.Path != path { - if err := validatePath(path); err != nil { - return fmt.Errorf("invalid path: %w", err) - } - if conn.File != nil { - if err := conn.unlockAndCloseFile(); err != nil { - return fmt.Errorf("connection update failed, error closing file: %w", err) - } - } - if err := os.RemoveAll(conn.Path); err != nil { - return fmt.Errorf("connection update failed, error deleting violations stored at old path: %w", err) - } - conn.Path = path - conn.File = nil - } - } else { - return fmt.Errorf("missing or invalid 'path' for connection %s", connectionName) + path, maxResults, err := unmarshalConfig(config) + if err != nil { + return fmt.Errorf("error creating connection %s: %w", connectionName, err) } - if maxResults, ok := cfg[maxAuditResults].(float64); ok { - if maxResults > maxAllowedAuditRuns { - return fmt.Errorf("maxAuditResults cannot be greater than %d", maxAllowedAuditRuns) + if conn.Path != path { + if conn.File != nil { + if err := conn.unlockAndCloseFile(); err != nil { + return fmt.Errorf("connection update failed, error closing file: %w", err) + } } - conn.MaxAuditResults = int(maxResults) - } else { - return fmt.Errorf("missing or invalid 'maxAuditResults' for connection %s", connectionName) + if err := os.RemoveAll(conn.Path); err != nil { + return fmt.Errorf("connection update failed, error deleting violations stored at old path: %w", err) + } + conn.Path = path + conn.File = nil } + conn.MaxAuditResults = int(maxResults) + r.openConnections[connectionName] = conn return nil } @@ -182,13 +155,17 @@ func (conn *Connection) handleAuditStart(auditID string, topic string) error { // Replace ':' with '_' to avoid issues with file names in windows conn.currentAuditRun = strings.ReplaceAll(auditID, ":", "_") - // Ensure the directory exists dir := path.Join(conn.Path, topic) - if err := os.MkdirAll(dir, 0o755); err != nil { - return fmt.Errorf("failed to create directories: %w", err) + if err := os.MkdirAll(dir, 0o777); err != nil { + return fmt.Errorf("failed to create directories:` %w", err) + } + + // Set the dir permissions to make sure reader can modify files if need be after the lock is released. + if err := os.Chmod(dir, 0o777); err != nil { + return fmt.Errorf("failed to set directory permissions: %w", err) } - file, err := os.OpenFile(path.Join(dir, appendExtension(conn.currentAuditRun, "txt")), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644) + file, err := os.OpenFile(path.Join(dir, appendExtension(conn.currentAuditRun, "txt")), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o666) if err != nil { return fmt.Errorf("failed to open file: %w", err) } @@ -217,6 +194,10 @@ func (conn *Connection) handleAuditEnd(topic string) error { if err := os.Rename(path.Join(conn.Path, topic, appendExtension(conn.currentAuditRun, "txt")), readyFilePath); err != nil { return fmt.Errorf("failed to rename file: %w, %s", err, conn.currentAuditRun) } + // Set the file permissions to make sure reader can modify files if need be after the lock is released. + if err := os.Chmod(readyFilePath, 0o777); err != nil { + return fmt.Errorf("failed to set file permissions: %w", err) + } log.Info("File renamed", "filename", readyFilePath) return conn.cleanupOldAuditFiles(topic) @@ -241,50 +222,50 @@ func (conn *Connection) unlockAndCloseFile() error { func (conn *Connection) cleanupOldAuditFiles(topic string) error { dirPath := path.Join(conn.Path, topic) - - for { - earliestFile, files, err := getEarliestFile(dirPath) - if err != nil { - return fmt.Errorf("error getting earliest file: %w", err) - } - if len(files) <= conn.MaxAuditResults { - break - } - if err := os.Remove(earliestFile); err != nil { - return fmt.Errorf("error removing file: %w", err) - } + files, err := getFilesSortedByModTimeAsc(dirPath) + if err != nil { + return fmt.Errorf("failed removing older audit files, error getting files sorted by mod time: %w", err) } - - return nil -} - -func getEarliestFile(dirPath string) (string, []string, error) { - var earliestFile string - var earliestModTime time.Time - var files []string - - err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() && (earliestFile == "" || info.ModTime().Before(earliestModTime)) { - earliestFile = path - earliestModTime = info.ModTime() - } - if !info.IsDir() { - files = append(files, path) + var errs []error + for i := 0; i < len(files)-conn.MaxAuditResults; i++ { + if e := os.Remove(files[i]); e != nil { + errs = append(errs, fmt.Errorf("error removing file: %w", e)) } - return nil - }) - if err != nil { - return "", files, err } - if earliestFile == "" { - return "", files, nil - } + return errors.Join(errs...) +} - return earliestFile, files, nil +func getFilesSortedByModTimeAsc(dirPath string) ([]string, error) { + type fileInfo struct { + path string + modTime time.Time + } + var filesInfo []fileInfo + + err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + filesInfo = append(filesInfo, fileInfo{path: path, modTime: info.ModTime()}) + } + return nil + }) + if err != nil { + return nil, err + } + + sort.Slice(filesInfo, func(i, j int) bool { + return filesInfo[i].modTime.Before(filesInfo[j].modTime) + }) + + var sortedFiles []string + for _, fi := range filesInfo { + sortedFiles = append(sortedFiles, fi.path) + } + + return sortedFiles, nil } func appendExtension(name string, ext string) string { @@ -300,7 +281,7 @@ func validatePath(path string) error { return fmt.Errorf("path must not contain '..', dir traversal is not allowed") } // validate if the path is writable - if err := os.MkdirAll(path, 0o755); err != nil { + if err := os.MkdirAll(path, 0o777); err != nil { return fmt.Errorf("failed to create directory: %w", err) } info, err := os.Stat(path) @@ -312,3 +293,26 @@ func validatePath(path string) error { } return nil } + +func unmarshalConfig(config interface{}) (string, float64, error) { + cfg, ok := config.(map[string]interface{}) + if !ok { + return "", 0.0, fmt.Errorf("invalid config format") + } + + path, pathOk := cfg[violationPath].(string) + if !pathOk { + return "", 0.0, fmt.Errorf("missing or invalid 'path'") + } + if err := validatePath(path); err != nil { + return "", 0.0, fmt.Errorf("invalid path: %w", err) + } + maxResults, maxResultsOk := cfg[maxAuditResults].(float64) + if !maxResultsOk { + return "", 0.0, fmt.Errorf("missing or invalid 'maxAuditResults'") + } + if maxResults > maxAllowedAuditRuns { + return "", 0.0, fmt.Errorf("maxAuditResults cannot be greater than the maximum allowed audit runs: %d", maxAllowedAuditRuns) + } + return path, maxResults, nil +} \ No newline at end of file diff --git a/pkg/export/disk/disk_test.go b/pkg/export/disk/disk_test.go index bb205e00d9f..911aa2375f1 100644 --- a/pkg/export/disk/disk_test.go +++ b/pkg/export/disk/disk_test.go @@ -766,7 +766,7 @@ func TestCleanupOldAuditFiles(t *testing.T) { } } -func TestGetEarliestFile(t *testing.T) { +func TestGetFilesSortedByModTimeAsc(t *testing.T) { tests := []struct { name string setup func(dir string) error @@ -842,7 +842,7 @@ func TestGetEarliestFile(t *testing.T) { t.Errorf("Setup failed: %v", err) } } - earliestFile, files, err := getEarliestFile(dir) + files, err := getFilesSortedByModTimeAsc(dir) if (err != nil) != tt.expectError { t.Errorf("getEarliestFile() error = %v, expectError %v", err, tt.expectError) } @@ -850,8 +850,8 @@ func TestGetEarliestFile(t *testing.T) { if len(files) != tt.expectedFiles { t.Errorf("Expected %d files, got %d", tt.expectedFiles, len(files)) } - if tt.expectedFile != "" && !strings.HasSuffix(earliestFile, tt.expectedFile) { - t.Errorf("Expected earliest file %s, got %s", tt.expectedFile, earliestFile) + if tt.expectedFile != "" && !strings.HasSuffix(files[0], tt.expectedFile) { + t.Errorf("Expected earliest file %s, got %s", tt.expectedFile, files[0]) } } }) From 601095b55fbc2c7e4ac74afc1bebf13a9b68d1ba Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Wed, 2 Apr 2025 18:05:04 +0000 Subject: [PATCH 25/33] fixing tests Signed-off-by: Jaydip Gabani --- pkg/export/disk/disk.go | 13 +-- pkg/export/disk/disk_test.go | 163 +++++++++++++++++++++++++++++++++-- 2 files changed, 158 insertions(+), 18 deletions(-) diff --git a/pkg/export/disk/disk.go b/pkg/export/disk/disk.go index f62103ad2b2..0a9fbc4d00e 100644 --- a/pkg/export/disk/disk.go +++ b/pkg/export/disk/disk.go @@ -69,17 +69,17 @@ func (r *Writer) UpdateConnection(_ context.Context, connectionName string, conf path, maxResults, err := unmarshalConfig(config) if err != nil { - return fmt.Errorf("error creating connection %s: %w", connectionName, err) + return fmt.Errorf("error updating connection %s: %w", connectionName, err) } if conn.Path != path { if conn.File != nil { if err := conn.unlockAndCloseFile(); err != nil { - return fmt.Errorf("connection update failed, error closing file: %w", err) + return fmt.Errorf("error updating connection %s, error closing file: %w", connectionName, err) } } if err := os.RemoveAll(conn.Path); err != nil { - return fmt.Errorf("connection update failed, error deleting violations stored at old path: %w", err) + return fmt.Errorf("error updating connection %s, error deleting violations stored at old path: %w", connectionName, err) } conn.Path = path conn.File = nil @@ -284,13 +284,6 @@ func validatePath(path string) error { if err := os.MkdirAll(path, 0o777); err != nil { return fmt.Errorf("failed to create directory: %w", err) } - info, err := os.Stat(path) - if err != nil { - return fmt.Errorf("failed to stat path: %w", err) - } - if !info.IsDir() { - return fmt.Errorf("path is not a directory") - } return nil } diff --git a/pkg/export/disk/disk_test.go b/pkg/export/disk/disk_test.go index 911aa2375f1..05c33dd73d8 100644 --- a/pkg/export/disk/disk_test.go +++ b/pkg/export/disk/disk_test.go @@ -41,7 +41,7 @@ func TestCreateConnection(t *testing.T) { config: map[int]interface{}{ 1: "test", }, - err: fmt.Errorf("invalid config format"), + err: fmt.Errorf("error creating connection conn2: invalid config format"), expectError: true, }, { @@ -50,7 +50,7 @@ func TestCreateConnection(t *testing.T) { config: map[string]interface{}{ "maxAuditResults": 10.0, }, - err: fmt.Errorf("missing or invalid 'path' for connection conn3"), + err: fmt.Errorf("error creating connection conn3: missing or invalid 'path'"), expectError: true, }, { @@ -59,7 +59,7 @@ func TestCreateConnection(t *testing.T) { config: map[string]interface{}{ "path": tmpPath, }, - err: fmt.Errorf("missing or invalid 'maxAuditResults' for connection conn4"), + err: fmt.Errorf("error creating connection conn4: missing or invalid 'maxAuditResults'"), expectError: true, }, { @@ -69,7 +69,7 @@ func TestCreateConnection(t *testing.T) { "path": tmpPath, "maxAuditResults": 10.0, }, - err: fmt.Errorf("maxAuditResults cannot be greater than 5"), + err: fmt.Errorf("error creating connection conn4: maxAuditResults cannot be greater than the maximum allowed audit runs: 5"), expectError: true, }, } @@ -156,7 +156,7 @@ func TestUpdateConnection(t *testing.T) { 1: "test", }, expectError: true, - err: fmt.Errorf("invalid config format"), + err: fmt.Errorf("error updating connection conn1: invalid config format"), }, { name: "Connection not found", @@ -175,7 +175,7 @@ func TestUpdateConnection(t *testing.T) { "maxAuditResults": 2.0, }, expectError: true, - err: fmt.Errorf("missing or invalid 'path' for connection conn1"), + err: fmt.Errorf("error updating connection conn1: missing or invalid 'path'"), }, { name: "Missing maxAuditResults", @@ -184,7 +184,7 @@ func TestUpdateConnection(t *testing.T) { "path": t.TempDir(), }, expectError: true, - err: fmt.Errorf("missing or invalid 'maxAuditResults' for connection conn1"), + err: fmt.Errorf("error updating connection conn1: missing or invalid 'maxAuditResults'"), }, { name: "Exceeding maxAuditResults", @@ -194,7 +194,7 @@ func TestUpdateConnection(t *testing.T) { "maxAuditResults": 10.0, }, expectError: true, - err: fmt.Errorf("maxAuditResults cannot be greater than 5"), + err: fmt.Errorf("error updating connection conn1: maxAuditResults cannot be greater than the maximum allowed audit runs: 5"), }, } @@ -857,3 +857,150 @@ func TestGetFilesSortedByModTimeAsc(t *testing.T) { }) } } + +func TestValidatePath(t *testing.T) { + tests := []struct { + name string + path string + setup func(path string) error + expectError bool + expectedErr string + }{ + { + name: "Valid path", + path: t.TempDir(), + setup: nil, + expectError: false, + }, + { + name: "Empty path", + path: "", + setup: nil, + expectError: true, + expectedErr: "path cannot be empty", + }, + { + name: "Path with '..'", + path: "../invalid/path", + setup: nil, + expectError: true, + expectedErr: "path must not contain '..', dir traversal is not allowed", + }, + { + name: "Path is a file", + path: func() string { + file, err := os.CreateTemp("", "testfile") + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + return file.Name() + }(), + setup: nil, + expectError: true, + expectedErr: "failed to create directory", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + if err := tt.setup(tt.path); err != nil { + t.Fatalf("Setup failed: %v", err) + } + } + err := validatePath(tt.path) + if (err != nil) != tt.expectError { + t.Errorf("validatePath() error = %v, expectError %v", err, tt.expectError) + } + if tt.expectError && err != nil && !strings.Contains(err.Error(), tt.expectedErr) { + t.Errorf("Expected error to contain %q, got %q", tt.expectedErr, err.Error()) + } + }) + } +} + +func TestUnmarshalConfig(t *testing.T) { + tmpPath := t.TempDir() + + tests := []struct { + name string + config interface{} + expectedPath string + expectedMax float64 + expectError bool + expectedErr string + }{ + { + name: "Valid config", + config: map[string]interface{}{ + "path": tmpPath, + "maxAuditResults": 3.0, + }, + expectedPath: tmpPath, + expectedMax: 3.0, + expectError: false, + }, + { + name: "Invalid config format", + config: map[int]interface{}{1: "test"}, + expectError: true, + expectedErr: "invalid config format", + }, + { + name: "Missing path", + config: map[string]interface{}{ + "maxAuditResults": 3.0, + }, + expectError: true, + expectedErr: "missing or invalid 'path'", + }, + { + name: "Invalid path", + config: map[string]interface{}{ + "path": "../invalid/path", + "maxAuditResults": 3.0, + }, + expectError: true, + expectedErr: "invalid path", + }, + { + name: "Missing maxAuditResults", + config: map[string]interface{}{ + "path": tmpPath, + }, + expectError: true, + expectedErr: "missing or invalid 'maxAuditResults'", + }, + { + name: "Exceeding maxAuditResults", + config: map[string]interface{}{ + "path": tmpPath, + "maxAuditResults": 10.0, + }, + expectError: true, + expectedErr: "maxAuditResults cannot be greater than the maximum allowed audit runs", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + path, maxResults, err := unmarshalConfig(tt.config) + if (err != nil) != tt.expectError { + t.Errorf("unmarshalConfig() error = %v, expectError %v", err, tt.expectError) + } + if tt.expectError && err != nil && !strings.Contains(err.Error(), tt.expectedErr) { + t.Errorf("Expected error to contain %q, got %q", tt.expectedErr, err.Error()) + } + if !tt.expectError { + if path != tt.expectedPath { + t.Errorf("Expected path %q, got %q", tt.expectedPath, path) + } + if maxResults != tt.expectedMax { + t.Errorf("Expected maxAuditResults %f, got %f", tt.expectedMax, maxResults) + } + } + }) + } +} + + From e4402e73500d3ddf50239548951eee7b7712b6d2 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Wed, 2 Apr 2025 23:36:55 +0000 Subject: [PATCH 26/33] updating docs, adding default sidecar values in charts Signed-off-by: Jaydip Gabani --- .github/workflows/disk-export.yaml | 16 ++--- Makefile | 40 ++--------- cmd/build/helmify/main.go | 4 +- cmd/build/helmify/static/README.md | 9 +-- ...keeper-audit-violation-export-config.yaml} | 2 +- cmd/build/helmify/static/values.yaml | 26 ++++++++ manifest_staging/charts/gatekeeper/README.md | 9 +-- .../gatekeeper-audit-deployment.yaml | 6 +- ...keeper-audit-violation-export-config.yaml} | 2 +- .../charts/gatekeeper/values.yaml | 26 ++++++++ .../export/export_config_controller.go | 3 +- test/export/fake-reader/main.go | 6 +- website/docs/export.md | 66 ++++++++++--------- 13 files changed, 119 insertions(+), 96 deletions(-) rename cmd/build/helmify/static/templates/{gatekeeper-audit-violation-expot-config.yaml => gatekeeper-audit-violation-export-config.yaml} (76%) rename manifest_staging/charts/gatekeeper/templates/{gatekeeper-audit-violation-expot-config.yaml => gatekeeper-audit-violation-export-config.yaml} (76%) diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml index b3f9b3a03ce..236c1c8806c 100644 --- a/.github/workflows/disk-export.yaml +++ b/.github/workflows/disk-export.yaml @@ -34,18 +34,14 @@ jobs: - name: Run e2e run: | - make docker-buildx \ - IMG=gatekeeper-e2e:latest - - make e2e-build-load-externaldata-image - - make e2e-reader-build-image PLATFORM="linux/amd64" - kind load docker-image --name kind \ - gatekeeper-e2e:latest fake-reader:latest + make docker-buildx IMG=gatekeeper-e2e:latest + make e2e-reader-build-image + make docker-buildx-crds CRD_IMG=gatekeeper-crds:latest + kind load docker-image --name kind gatekeeper-e2e:latest fake-reader:latest gatekeeper-crds:latest + cmdkubectl create ns gatekeeper-system - make deploy IMG=gatekeeper-e2e:latest USE_LOCAL_IMG=true GENERATE_VAP=true GENERATE_VAPBINDING=true EXPORT_BACKEND=disk + make e2e-helm-deploy HELM_REPO=gatekeeper-e2e HELM_CRD_REPO=gatekeeper-crds HELM_RELEASE=latest ENABLE_EXPORT=true LOG_LEVEL=DEBUG EXPORT_BACKEND=disk FAKE_READER_IMAGE_PULL_POLICY=Never AUDIT_CONNECTION=audit-connection AUDIT_CHANNEL=audit-channel EXPORT_DISK_PATH=/tmp/violations MAX_AUDIT_RESULTS=3 FAKE_READER_IMAGE=fake-reader:latest - kubectl apply -f test/export/fake-reader/export_config.yaml make test-e2e ENABLE_EXPORT_TESTS=1 - name: Save logs diff --git a/Makefile b/Makefile index 8af261ebf1d..b1d348cadd3 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,9 @@ HELM_DAPR_EXPORT_ARGS := --set-string auditPodAnnotations.dapr\\.io/enabled=true HELM_DISK_EXPORT_ARGS := --set audit.exportVolumeMount.path=${EXPORT_DISK_PATH} \ --set audit.exportConfig.maxAuditResults=${MAX_AUDIT_RESULTS} \ - -f /tmp/values.yaml \ + --set audit.exportSidecar.image=${FAKE_READER_IMAGE} \ + --set audit.exportSidecar.imagePullPolicy=${FAKE_READER_IMAGE_PULL_POLICY} \ + HELM_EXPORT_ARGS := --set enableViolationExport=${ENABLE_EXPORT} \ --set audit.connection=${AUDIT_CONNECTION} \ @@ -139,34 +141,6 @@ MANAGER_IMAGE_PATCH := "apiVersion: apps/v1\ \n - --log-level=${LOG_LEVEL}\ \n" -MANAGER_SIDECAR_IMAGE_PATCH := "\n - --enable-violation-export=true\ -\n - --audit-connection=audit\ -\n - --audit-channel=audit\ -\n volumeMounts:\ -\n - mountPath: /tmp/violations\ -\n name: tmp-violations\ -\n - name: go-sub\ -\n image: ${FAKE_READER_IMAGE}\ -\n imagePullPolicy: ${FAKE_READER_IMAGE_PULL_POLICY}\ -\n securityContext:\ -\n allowPrivilegeEscalation: false\ -\n capabilities:\ -\n drop:\ -\n - ALL\ -\n readOnlyRootFilesystem: true\ -\n runAsGroup: 999\ -\n runAsNonRoot: true\ -\n runAsUser: 1000\ -\n seccompProfile:\ -\n type: RuntimeDefault\ -\n volumeMounts:\ -\n - mountPath: /tmp/violations\ -\n name: tmp-violations\ -\n volumes:\ -\n - emptyDir: {}\ -\n name: tmp-violations\ -\n" - HELM_EXPORT_VARIABLES := "audit:\ \n exportVolume:\ \n name: tmp-violations\ @@ -286,7 +260,7 @@ e2e-helm-install: cd .staging/helm && tar -xvf helmbin.tar.gz ./.staging/helm/linux-amd64/helm version --client -e2e-helm-deploy: e2e-helm-install $(LOCALBIN) create-values +e2e-helm-deploy: e2e-helm-install $(LOCALBIN) ifeq ($(ENABLE_EXPORT),true) ./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name-template=gatekeeper \ --namespace ${GATEKEEPER_NAMESPACE} \ @@ -365,9 +339,6 @@ deploy: patch-image manifests ifeq ($(ENABLE_GENERATOR_EXPANSION),true) @grep -q -v 'enable-generator-resource-expansion' ./config/overlays/dev/manager_image_patch.yaml && sed -i '/- --operation=webhook/a \ \ \ \ \ \ \ \ - --enable-generator-resource-expansion=true' ./config/overlays/dev/manager_image_patch.yaml @grep -q -v 'enable-generator-resource-expansion' ./config/overlays/dev/manager_image_patch.yaml && sed -i '/- --operation=audit/a \ \ \ \ \ \ \ \ - --enable-generator-resource-expansion=true' ./config/overlays/dev/manager_image_patch.yaml -endif -ifeq ($(EXPORT_BACKEND),disk) - @bash -c 'echo -e ${MANAGER_SIDECAR_IMAGE_PATCH} >> ./config/overlays/dev/manager_image_patch.yaml' endif docker run \ -v $(shell pwd)/config:/config \ @@ -630,6 +601,3 @@ tilt: generate manifests tilt-prepare tilt-clean: rm -rf .tiltbuild - -create-values: - @echo ${HELM_EXPORT_VARIABLES} > /tmp/values.yaml \ No newline at end of file diff --git a/cmd/build/helmify/main.go b/cmd/build/helmify/main.go index e322ba7830c..a4a637acc01 100644 --- a/cmd/build/helmify/main.go +++ b/cmd/build/helmify/main.go @@ -141,8 +141,8 @@ func (ks *kindSet) Write() error { obj = "{{- if not .Values.disableAudit }}\n" + obj + "{{- end }}\n" obj = strings.Replace(obj, " labels:", " labels:\n {{- include \"gatekeeper.podLabels\" . | nindent 8 }}\n {{- include \"audit.podLabels\" . | nindent 8 }}\n {{- include \"gatekeeper.commonLabels\" . | nindent 8 }}", 1) obj = strings.Replace(obj, " priorityClassName: system-cluster-critical", " {{- if .Values.audit.priorityClassName }}\n priorityClassName: {{ .Values.audit.priorityClassName }}\n {{- end }}", 1) - obj = strings.Replace(obj, " name: tmp-volume", " name: tmp-volume\n {{- if .Values.audit.exportVolumeMount }}\n - mountPath: {{ .Values.audit.exportVolumeMount.path }}\n name: {{ .Values.audit.exportVolume.name }}\n {{- end }}\n {{ if .Values.audit.exportSidecar }}\n - {{ toYaml .Values.audit.exportSidecar | nindent 8 }}\n {{- end }}", 1) - obj = strings.Replace(obj, " - emptyDir: {}", " {{- if .Values.audit.exportVolume }}\n - {{- toYaml .Values.audit.exportVolume | nindent 8 }}\n {{- end }}\n {{- if .Values.audit.writeToRAMDisk }}\n - emptyDir:\n medium: Memory\n {{ else }}\n - emptyDir: {}\n {{- end }}", 1) + obj = strings.Replace(obj, " name: tmp-volume", " name: tmp-volume\n {{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default \"\" | lower) \"disk\") }}\n - mountPath: {{ .Values.audit.exportVolumeMount.path }}\n name: {{ .Values.audit.exportVolume.name }}\n {{- end }}\n {{ if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default \"\" | lower) \"disk\") }}\n - {{ toYaml .Values.audit.exportSidecar | nindent 8 }}\n {{- end }}", 1) + obj = strings.Replace(obj, " - emptyDir: {}", " {{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default \"\" | lower) \"disk\") }}\n - {{- toYaml .Values.audit.exportVolume | nindent 8 }}\n {{- end }}\n {{- if .Values.audit.writeToRAMDisk }}\n - emptyDir:\n medium: Memory\n {{ else }}\n - emptyDir: {}\n {{- end }}", 1) } if name == "gatekeeper-manager-role" && kind == "Role" { diff --git a/cmd/build/helmify/static/README.md b/cmd/build/helmify/static/README.md index d484f41b2f0..e198f391a23 100644 --- a/cmd/build/helmify/static/README.md +++ b/cmd/build/helmify/static/README.md @@ -224,10 +224,11 @@ information._ | enableViolationExport | (alpha) Enable exporting violations to external systems | `false` | | audit.connection | (alpha) Connection name for exporting audit violation messages | `audit-connection` | | audit.channel | (alpha) Channel name for exporting audit violation messages | `audit-channel` | -| audit.exportVolume | (alpha) Volume for audit pod to export violations. | nil | -| audit.exportVolumeMount.path | (alpha) VolumeMount for audit pod manager container to export violations and sidecar container to read from. | nil | -| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | nil | -| audit.exportConfig.maxAuditResults | (alpha) Maximum number of audit results that can be stored in the export path. | nil | +| audit.exportVolume | (alpha) Volume for audit pod to export violations. | `{"name":"tmp-violations","emptyDir":{}}` | +| audit.exportVolumeMount.path | (alpha) VolumeMount for audit pod manager container to export violations and sidecar container to read from. | `/tmp/violations` | +| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | `{"name":"reader","image":"openpolicyagent/fake-reader:dev","imagePullPolicy":"Always","securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":999,"runAsNonRoot":true,"runAsUser":1000,"seccompProfile":{"type":"RuntimeDefault"}},"volumeMounts":[{"mountPath":"/tmp/violations","name":"tmp-violations"}]}` | +| audit.exportConfig.maxAuditResults | (alpha) Maximum number of audit results that can be stored in the export path. | 3 | +| exportBackend | (alpha) Backend to use for exporting violations, i.e. dapr, disk. | "" | | replicas | The number of Gatekeeper replicas to deploy for the webhook | `3` | | podAnnotations | The annotations to add to the Gatekeeper pods | `container.seccomp.security.alpha.kubernetes.io/manager: runtime/default` | | podLabels | The labels to add to the Gatekeeper pods | `{}` | diff --git a/cmd/build/helmify/static/templates/gatekeeper-audit-violation-expot-config.yaml b/cmd/build/helmify/static/templates/gatekeeper-audit-violation-export-config.yaml similarity index 76% rename from cmd/build/helmify/static/templates/gatekeeper-audit-violation-expot-config.yaml rename to cmd/build/helmify/static/templates/gatekeeper-audit-violation-export-config.yaml index 4988172e91b..08244a8bb50 100644 --- a/cmd/build/helmify/static/templates/gatekeeper-audit-violation-expot-config.yaml +++ b/cmd/build/helmify/static/templates/gatekeeper-audit-violation-export-config.yaml @@ -1,5 +1,5 @@ --- -{{- if and .Values.enableViolationExport (eq .Values.exportBackend "disk" ) }} +{{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default "" | lower) "disk") }} apiVersion: v1 kind: ConfigMap metadata: diff --git a/cmd/build/helmify/static/values.yaml b/cmd/build/helmify/static/values.yaml index 6ebaa3f81f1..082362e14c5 100644 --- a/cmd/build/helmify/static/values.yaml +++ b/cmd/build/helmify/static/values.yaml @@ -225,7 +225,33 @@ controllerManager: # - from: # - ipBlock: # cidr: 0.0.0.0/0 +exportBackend: "" audit: + exportConfig: + maxAuditResults: 3 + exportVolumeMount: + path: /tmp/violations + exportVolume: + name: tmp-violations + emptyDir: {} + exportSidecar: + name: reader + image: openpolicyagent/fake-reader:dev + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations serviceAccount: name: gatekeeper-admin containerName: manager diff --git a/manifest_staging/charts/gatekeeper/README.md b/manifest_staging/charts/gatekeeper/README.md index d484f41b2f0..e198f391a23 100644 --- a/manifest_staging/charts/gatekeeper/README.md +++ b/manifest_staging/charts/gatekeeper/README.md @@ -224,10 +224,11 @@ information._ | enableViolationExport | (alpha) Enable exporting violations to external systems | `false` | | audit.connection | (alpha) Connection name for exporting audit violation messages | `audit-connection` | | audit.channel | (alpha) Channel name for exporting audit violation messages | `audit-channel` | -| audit.exportVolume | (alpha) Volume for audit pod to export violations. | nil | -| audit.exportVolumeMount.path | (alpha) VolumeMount for audit pod manager container to export violations and sidecar container to read from. | nil | -| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | nil | -| audit.exportConfig.maxAuditResults | (alpha) Maximum number of audit results that can be stored in the export path. | nil | +| audit.exportVolume | (alpha) Volume for audit pod to export violations. | `{"name":"tmp-violations","emptyDir":{}}` | +| audit.exportVolumeMount.path | (alpha) VolumeMount for audit pod manager container to export violations and sidecar container to read from. | `/tmp/violations` | +| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | `{"name":"reader","image":"openpolicyagent/fake-reader:dev","imagePullPolicy":"Always","securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":999,"runAsNonRoot":true,"runAsUser":1000,"seccompProfile":{"type":"RuntimeDefault"}},"volumeMounts":[{"mountPath":"/tmp/violations","name":"tmp-violations"}]}` | +| audit.exportConfig.maxAuditResults | (alpha) Maximum number of audit results that can be stored in the export path. | 3 | +| exportBackend | (alpha) Backend to use for exporting violations, i.e. dapr, disk. | "" | | replicas | The number of Gatekeeper replicas to deploy for the webhook | `3` | | podAnnotations | The annotations to add to the Gatekeeper pods | `container.seccomp.security.alpha.kubernetes.io/manager: runtime/default` | | podLabels | The labels to add to the Gatekeeper pods | `{}` | diff --git a/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml index b35e08f5f18..6b27f618b69 100644 --- a/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml +++ b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml @@ -162,11 +162,11 @@ spec: readOnly: true - mountPath: /tmp/audit name: tmp-volume - {{- if .Values.audit.exportVolumeMount }} + {{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default "" | lower) "disk") }} - mountPath: {{ .Values.audit.exportVolumeMount.path }} name: {{ .Values.audit.exportVolume.name }} {{- end }} - {{ if .Values.audit.exportSidecar }} + {{ if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default "" | lower) "disk") }} - {{ toYaml .Values.audit.exportSidecar | nindent 8 }} {{- end }} dnsPolicy: {{ .Values.audit.dnsPolicy }} @@ -189,7 +189,7 @@ spec: secret: defaultMode: 420 secretName: gatekeeper-webhook-server-cert - {{- if .Values.audit.exportVolume }} + {{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default "" | lower) "disk") }} - {{- toYaml .Values.audit.exportVolume | nindent 8 }} {{- end }} {{- if .Values.audit.writeToRAMDisk }} diff --git a/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-expot-config.yaml b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-export-config.yaml similarity index 76% rename from manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-expot-config.yaml rename to manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-export-config.yaml index 4988172e91b..08244a8bb50 100644 --- a/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-expot-config.yaml +++ b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-export-config.yaml @@ -1,5 +1,5 @@ --- -{{- if and .Values.enableViolationExport (eq .Values.exportBackend "disk" ) }} +{{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default "" | lower) "disk") }} apiVersion: v1 kind: ConfigMap metadata: diff --git a/manifest_staging/charts/gatekeeper/values.yaml b/manifest_staging/charts/gatekeeper/values.yaml index 6ebaa3f81f1..082362e14c5 100644 --- a/manifest_staging/charts/gatekeeper/values.yaml +++ b/manifest_staging/charts/gatekeeper/values.yaml @@ -225,7 +225,33 @@ controllerManager: # - from: # - ipBlock: # cidr: 0.0.0.0/0 +exportBackend: "" audit: + exportConfig: + maxAuditResults: 3 + exportVolumeMount: + path: /tmp/violations + exportVolume: + name: tmp-violations + emptyDir: {} + exportSidecar: + name: reader + image: openpolicyagent/fake-reader:dev + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations serviceAccount: name: gatekeeper-admin containerName: manager diff --git a/pkg/controller/export/export_config_controller.go b/pkg/controller/export/export_config_controller.go index 43c7cb155d5..5a5718e4870 100644 --- a/pkg/controller/export/export_config_controller.go +++ b/pkg/controller/export/export_config_controller.go @@ -5,6 +5,7 @@ import ( "encoding/json" "flag" "fmt" + "strings" "github.com/open-policy-agent/gatekeeper/v3/pkg/export" "github.com/open-policy-agent/gatekeeper/v3/pkg/logging" @@ -122,7 +123,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( return reconcile.Result{}, err } - err = r.system.UpsertConnection(ctx, config, request.Name, cfg.Data["driver"]) + err = r.system.UpsertConnection(ctx, config, request.Name, strings.ToLower(cfg.Data["driver"])) if err != nil { return reconcile.Result{}, err } diff --git a/test/export/fake-reader/main.go b/test/export/fake-reader/main.go index 56416ad687c..02fcff1cb49 100644 --- a/test/export/fake-reader/main.go +++ b/test/export/fake-reader/main.go @@ -24,13 +24,13 @@ func main() { for { // Find the latest created file in dirPath latestFile, files, err := getLatestFile(dirPath) - log.Println("available files", files) - log.Println("reading from", latestFile) if err != nil { - log.Println("Error finding latest file", err) + log.Println("Latest file is not found, retring in 5 seconds", err) time.Sleep(5 * time.Second) continue } + log.Println("available files", files) + log.Println("reading from", latestFile) file, err := os.OpenFile(latestFile, os.O_RDONLY, 0o644) if err != nil { log.Println("Error opening file", err) diff --git a/website/docs/export.md b/website/docs/export.md index 638285b1443..f55bb9c38e9 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -218,28 +218,51 @@ data: 1. Build `fake-reader` image from [gatekeeper/test/export/fake-reader](https://github.com/open-policy-agent/gatekeeper/tree/master/test/export/fake-reader) ```bash - docker buildx build -t --load -f test/export/fake-reader/Dockerfile test/export/fake-reader + docker buildx build -t fake-reader:latest --load -f test/export/fake-reader/Dockerfile test/export/fake-reader ``` :::tip - You can use `make e2e-reader-build-image FAKE_READER_IMAGE=` defined in [Makefile](https://github.com/open-policy-agent/gatekeeper/tree/master/Makefile) + You can use `make e2e-reader-build-image` defined in [Makefile](https://github.com/open-policy-agent/gatekeeper/tree/master/Makefile) to build image for the reader. ::: **Note:** Make sure the fake-reader image is available in your preferred registry or cluster. -2. Create `values.yaml` with the following variables. +2. Deploy Gatekeeper charts with `values.yaml`. + + ```shell + helm upgrade --install gatekeeper gatekeeper/gatekeeper --namespace gatekeeper-system \ + --set enableViolationExport=true \ + --set audit.connection=audit-connection \ + --set audit.channel=audit-channel \ + --set audit.exportConfig.maxAuditResults=3 \ + --set exportBackend=disk \ + --set audit.exportSidecar.image=fake-reader:latest \ + --set audit.exportSidecar.imagePullPolicy=IfNotPresent \ + --set audit.exportVolumeMount.path=/tmp/violations \ + ``` + + **Note**: After the audit pod starts, verify that it contains two running containers. + + ```shell + kubectl get pod -n gatekeeper-system + NAME READY STATUS RESTARTS AGE + gatekeeper-audit-6865f5f56d-vclxw 2/2 Running 0 12s + ``` + + :::tip + The command above deploys the audit pod with a default sidecar reader and volume. To customize the sidecar reader or volume according to your requirements, you can set the following variables in your values.yaml file: ```yaml audit: exportVolume: - + exportVolumeMount: - + path: exportSidecar: ``` - - Here is the default `values.yaml` that you can use. + + Below are the defaults: ```yaml audit: @@ -249,8 +272,8 @@ data: exportVolumeMount: path: /tmp/violations exportSidecar: - name: go-sub - image: fake-reader:latest + name: reader + image: openpolicyagent/fake-reader:dev imagePullPolicy: Always securityContext: allowPrivilegeEscalation: false @@ -267,28 +290,9 @@ data: - mountPath: /tmp/violations name: tmp-violations ``` + ::: -3. Deploy Gatekeeper charts with `values.yaml`. - - ```shell - helm upgrade --install gatekeeper gatekeeper/gatekeeper --namespace gatekeeper-system \ - --set enableViolationExport=true \ - --set audit.connection=audit-connection \ - --set audit.channel=audit-channel \ - --set audit.exportConfig.maxAuditResults=3 \ - --set exportBackend=disk \ - --values /path/to/values.yaml - ``` - - **Note**: After the audit pod starts, verify that it contains two running containers. - - ```shell - kubectl get pod -n gatekeeper-system - NAME READY STATUS RESTARTS AGE - gatekeeper-audit-6865f5f56d-vclxw 2/2 Running 0 12s - ``` - -4. Create the constraint templates and constraints, and make sure audit ran by checking constraints. If constraint status is updated with information such as `auditTimeStamp` or `totalViolations`, then audit has ran at least once. Additionally, populated `TOTAL-VIOLATIONS` field for all constraints while listing constraints also indicates that audit has ran at least once. +3. Create the constraint templates and constraints, and make sure audit ran by checking constraints. If constraint status is updated with information such as `auditTimeStamp` or `totalViolations`, then audit has ran at least once. Additionally, populated `TOTAL-VIOLATIONS` field for all constraints while listing constraints also indicates that audit has ran at least once. ```log kubectl get constraint @@ -296,7 +300,7 @@ data: pod-must-have-test 0 ``` -5. Finally, check the sidecar reader logs to see the violations written. +4. Finally, check the sidecar reader logs to see the violations written. ```log kubectl logs -l gatekeeper.sh/operation=audit -c go-sub -n gatekeeper-system From 567ec399dbbd2be2fd2bec171ad66b7cd5cf0edc Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Thu, 3 Apr 2025 00:17:08 +0000 Subject: [PATCH 27/33] updating tests Signed-off-by: Jaydip Gabani --- .github/workflows/dapr-export.yaml | 2 +- .github/workflows/disk-export.yaml | 5 +++-- Makefile | 1 - test/bats/helpers.bash | 14 ++++++++++++-- test/bats/test.bats | 2 +- 5 files changed, 17 insertions(+), 7 deletions(-) diff --git a/.github/workflows/dapr-export.yaml b/.github/workflows/dapr-export.yaml index 601657a5c35..3e916bc9191 100644 --- a/.github/workflows/dapr-export.yaml +++ b/.github/workflows/dapr-export.yaml @@ -51,7 +51,7 @@ jobs: kubectl create ns gatekeeper-system make e2e-publisher-deploy make e2e-helm-deploy HELM_REPO=gatekeeper-e2e HELM_CRD_REPO=gatekeeper-crds HELM_RELEASE=latest ENABLE_EXPORT=true LOG_LEVEL=DEBUG AUDIT_CHANNEL=audit-channel EXPORT_BACKEND=dapr - make test-e2e ENABLE_EXPORT_TESTS=1 + make test-e2e ENABLE_EXPORT_TESTS=1 EXPORT_BACKEND=dapr - name: Save logs if: ${{ always() }} diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml index 236c1c8806c..ed5540d1bfd 100644 --- a/.github/workflows/disk-export.yaml +++ b/.github/workflows/disk-export.yaml @@ -35,14 +35,15 @@ jobs: - name: Run e2e run: | make docker-buildx IMG=gatekeeper-e2e:latest + make e2e-build-load-externaldata-image make e2e-reader-build-image make docker-buildx-crds CRD_IMG=gatekeeper-crds:latest kind load docker-image --name kind gatekeeper-e2e:latest fake-reader:latest gatekeeper-crds:latest - cmdkubectl create ns gatekeeper-system + kubectl create ns gatekeeper-system make e2e-helm-deploy HELM_REPO=gatekeeper-e2e HELM_CRD_REPO=gatekeeper-crds HELM_RELEASE=latest ENABLE_EXPORT=true LOG_LEVEL=DEBUG EXPORT_BACKEND=disk FAKE_READER_IMAGE_PULL_POLICY=Never AUDIT_CONNECTION=audit-connection AUDIT_CHANNEL=audit-channel EXPORT_DISK_PATH=/tmp/violations MAX_AUDIT_RESULTS=3 FAKE_READER_IMAGE=fake-reader:latest - make test-e2e ENABLE_EXPORT_TESTS=1 + make test-e2e ENABLE_EXPORT_TESTS=1 EXPORT_BACKEND=disk - name: Save logs if: ${{ always() }} diff --git a/Makefile b/Makefile index b1d348cadd3..7a736d7226d 100644 --- a/Makefile +++ b/Makefile @@ -46,7 +46,6 @@ HELM_DISK_EXPORT_ARGS := --set audit.exportVolumeMount.path=${EXPORT_DISK_PATH} --set audit.exportSidecar.image=${FAKE_READER_IMAGE} \ --set audit.exportSidecar.imagePullPolicy=${FAKE_READER_IMAGE_PULL_POLICY} \ - HELM_EXPORT_ARGS := --set enableViolationExport=${ENABLE_EXPORT} \ --set audit.connection=${AUDIT_CONNECTION} \ --set audit.channel=${AUDIT_CHANNEL} \ diff --git a/test/bats/helpers.bash b/test/bats/helpers.bash index e2acb9e1205..7901cb63660 100644 --- a/test/bats/helpers.bash +++ b/test/bats/helpers.bash @@ -140,8 +140,18 @@ mutator_enforced() { } total_violations() { + local backend="$1" ct_total_violations="$(kubectl get k8srequiredlabels pod-must-have-test -n gatekeeper-system -ojson | jq '.status.totalViolations')" audit_id="$(kubectl get k8srequiredlabels pod-must-have-test -n gatekeeper-system -ojson | jq '.status.auditTimestamp')" - violations="$(kubectl logs -n fake-subscriber -l app=sub -c go-sub --tail=-1 | grep $audit_id | grep violation_audited | wc -l)" - [[ "${ct_total_violations}" -eq "${violations}" ]] + + if [[ "${backend}" == "dapr" ]]; then + violations="$(kubectl logs -n fake-subscriber -l app=sub -c go-sub --tail=-1 | grep $audit_id | grep violation_audited | wc -l)" + return [[ "${ct_total_violations}" -eq "${violations}" ]] + elif [[ "${backend}" == "disk" ]]; then + violations="$(kubectl logs -n gatekeeper-system -l gatekeeper.sh/operation=audit -c reader --tail=-1 | grep $audit_id | grep violation_audited | wc -l)" + return [[ "${ct_total_violations}" -eq "${violations}" ]] + else + echo "Unknown backend: ${backend}" + return 1 + fi } diff --git a/test/bats/test.bats b/test/bats/test.bats index 8b59e85fbea..40d3a25de08 100644 --- a/test/bats/test.bats +++ b/test/bats/test.bats @@ -675,7 +675,7 @@ __expansion_audit_test() { wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "constraint_enforced k8srequiredlabels pod-must-have-test" - wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "total_violations" + wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "total_violations ${EXPORT_BACKEND}" run kubectl delete -f test/export/k8srequiredlabels_ct.yaml --ignore-not-found run kubectl delete -f test/export/pod_must_have_test.yaml --ignore-not-found From 481278051f69379aad10ba17dfe82aa21f392b37 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Thu, 3 Apr 2025 00:24:19 +0000 Subject: [PATCH 28/33] fixing tests Signed-off-by: Jaydip Gabani --- test/bats/helpers.bash | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/test/bats/helpers.bash b/test/bats/helpers.bash index 7901cb63660..b28ee6546fa 100644 --- a/test/bats/helpers.bash +++ b/test/bats/helpers.bash @@ -143,15 +143,14 @@ total_violations() { local backend="$1" ct_total_violations="$(kubectl get k8srequiredlabels pod-must-have-test -n gatekeeper-system -ojson | jq '.status.totalViolations')" audit_id="$(kubectl get k8srequiredlabels pod-must-have-test -n gatekeeper-system -ojson | jq '.status.auditTimestamp')" - + violations="" if [[ "${backend}" == "dapr" ]]; then violations="$(kubectl logs -n fake-subscriber -l app=sub -c go-sub --tail=-1 | grep $audit_id | grep violation_audited | wc -l)" - return [[ "${ct_total_violations}" -eq "${violations}" ]] elif [[ "${backend}" == "disk" ]]; then violations="$(kubectl logs -n gatekeeper-system -l gatekeeper.sh/operation=audit -c reader --tail=-1 | grep $audit_id | grep violation_audited | wc -l)" - return [[ "${ct_total_violations}" -eq "${violations}" ]] else echo "Unknown backend: ${backend}" return 1 - fi + fi + [[ "${ct_total_violations}" -eq "${violations}" ]] } From 2aae258516f8e73dac8a2576d31938034b4e0129 Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Thu, 3 Apr 2025 01:02:01 +0000 Subject: [PATCH 29/33] fixing export test ci Signed-off-by: Jaydip Gabani --- .github/workflows/disk-export.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml index ed5540d1bfd..0a805282364 100644 --- a/.github/workflows/disk-export.yaml +++ b/.github/workflows/disk-export.yaml @@ -49,7 +49,7 @@ jobs: if: ${{ always() }} run: | kubectl logs -n gatekeeper-system -l control-plane=audit-controller -c manager --tail=-1 > logs-audit-manager.json - kubectl logs -n gatekeeper-system -l control-plane=audit-controller -c go-sub --tail=-1 > logs-audit-export.json + kubectl logs -n gatekeeper-system -l control-plane=audit-controller -c reader --tail=-1 > logs-audit-export.json - name: Upload artifacts uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 From 304179346616a4f27410b424a3bfa9f0bcfe88aa Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Thu, 3 Apr 2025 18:34:38 +0000 Subject: [PATCH 30/33] updating docs to use published fake-reader image Signed-off-by: Jaydip Gabani --- cmd/build/helmify/static/README.md | 2 +- cmd/build/helmify/static/values.yaml | 2 +- manifest_staging/charts/gatekeeper/README.md | 2 +- .../charts/gatekeeper/values.yaml | 2 +- website/docs/export.md | 24 +++++-------------- 5 files changed, 10 insertions(+), 22 deletions(-) diff --git a/cmd/build/helmify/static/README.md b/cmd/build/helmify/static/README.md index e198f391a23..91554d4f81c 100644 --- a/cmd/build/helmify/static/README.md +++ b/cmd/build/helmify/static/README.md @@ -226,7 +226,7 @@ information._ | audit.channel | (alpha) Channel name for exporting audit violation messages | `audit-channel` | | audit.exportVolume | (alpha) Volume for audit pod to export violations. | `{"name":"tmp-violations","emptyDir":{}}` | | audit.exportVolumeMount.path | (alpha) VolumeMount for audit pod manager container to export violations and sidecar container to read from. | `/tmp/violations` | -| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | `{"name":"reader","image":"openpolicyagent/fake-reader:dev","imagePullPolicy":"Always","securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":999,"runAsNonRoot":true,"runAsUser":1000,"seccompProfile":{"type":"RuntimeDefault"}},"volumeMounts":[{"mountPath":"/tmp/violations","name":"tmp-violations"}]}` | +| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | `{"name":"reader","image":"ghcr.io/open-policy-agent/fake-reader:latest","imagePullPolicy":"Always","securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":999,"runAsNonRoot":true,"runAsUser":1000,"seccompProfile":{"type":"RuntimeDefault"}},"volumeMounts":[{"mountPath":"/tmp/violations","name":"tmp-violations"}]}` | | audit.exportConfig.maxAuditResults | (alpha) Maximum number of audit results that can be stored in the export path. | 3 | | exportBackend | (alpha) Backend to use for exporting violations, i.e. dapr, disk. | "" | | replicas | The number of Gatekeeper replicas to deploy for the webhook | `3` | diff --git a/cmd/build/helmify/static/values.yaml b/cmd/build/helmify/static/values.yaml index 082362e14c5..823141533b9 100644 --- a/cmd/build/helmify/static/values.yaml +++ b/cmd/build/helmify/static/values.yaml @@ -236,7 +236,7 @@ audit: emptyDir: {} exportSidecar: name: reader - image: openpolicyagent/fake-reader:dev + image: ghcr.io/open-policy-agent/fake-reader:latest imagePullPolicy: Always securityContext: allowPrivilegeEscalation: false diff --git a/manifest_staging/charts/gatekeeper/README.md b/manifest_staging/charts/gatekeeper/README.md index e198f391a23..91554d4f81c 100644 --- a/manifest_staging/charts/gatekeeper/README.md +++ b/manifest_staging/charts/gatekeeper/README.md @@ -226,7 +226,7 @@ information._ | audit.channel | (alpha) Channel name for exporting audit violation messages | `audit-channel` | | audit.exportVolume | (alpha) Volume for audit pod to export violations. | `{"name":"tmp-violations","emptyDir":{}}` | | audit.exportVolumeMount.path | (alpha) VolumeMount for audit pod manager container to export violations and sidecar container to read from. | `/tmp/violations` | -| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | `{"name":"reader","image":"openpolicyagent/fake-reader:dev","imagePullPolicy":"Always","securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":999,"runAsNonRoot":true,"runAsUser":1000,"seccompProfile":{"type":"RuntimeDefault"}},"volumeMounts":[{"mountPath":"/tmp/violations","name":"tmp-violations"}]}` | +| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | `{"name":"reader","image":"ghcr.io/open-policy-agent/fake-reader:latest","imagePullPolicy":"Always","securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":999,"runAsNonRoot":true,"runAsUser":1000,"seccompProfile":{"type":"RuntimeDefault"}},"volumeMounts":[{"mountPath":"/tmp/violations","name":"tmp-violations"}]}` | | audit.exportConfig.maxAuditResults | (alpha) Maximum number of audit results that can be stored in the export path. | 3 | | exportBackend | (alpha) Backend to use for exporting violations, i.e. dapr, disk. | "" | | replicas | The number of Gatekeeper replicas to deploy for the webhook | `3` | diff --git a/manifest_staging/charts/gatekeeper/values.yaml b/manifest_staging/charts/gatekeeper/values.yaml index 082362e14c5..823141533b9 100644 --- a/manifest_staging/charts/gatekeeper/values.yaml +++ b/manifest_staging/charts/gatekeeper/values.yaml @@ -236,7 +236,7 @@ audit: emptyDir: {} exportSidecar: name: reader - image: openpolicyagent/fake-reader:dev + image: ghcr.io/open-policy-agent/fake-reader:latest imagePullPolicy: Always securityContext: allowPrivilegeEscalation: false diff --git a/website/docs/export.md b/website/docs/export.md index f55bb9c38e9..5f088eb9a15 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -215,19 +215,7 @@ data: #### Configure Gatekeeper with Export enabled to Disk -1. Build `fake-reader` image from [gatekeeper/test/export/fake-reader](https://github.com/open-policy-agent/gatekeeper/tree/master/test/export/fake-reader) - - ```bash - docker buildx build -t fake-reader:latest --load -f test/export/fake-reader/Dockerfile test/export/fake-reader - ``` - - :::tip - You can use `make e2e-reader-build-image` defined in [Makefile](https://github.com/open-policy-agent/gatekeeper/tree/master/Makefile) to build image for the reader. - ::: - - **Note:** Make sure the fake-reader image is available in your preferred registry or cluster. - -2. Deploy Gatekeeper charts with `values.yaml`. +1. Deploy Gatekeeper with disk export configurations. ```shell helm upgrade --install gatekeeper gatekeeper/gatekeeper --namespace gatekeeper-system \ @@ -236,8 +224,8 @@ data: --set audit.channel=audit-channel \ --set audit.exportConfig.maxAuditResults=3 \ --set exportBackend=disk \ - --set audit.exportSidecar.image=fake-reader:latest \ - --set audit.exportSidecar.imagePullPolicy=IfNotPresent \ + --set audit.exportSidecar.image=ghcr.io/open-policy-agent/fake-reader:latest \ + --set audit.exportSidecar.imagePullPolicy=Always \ --set audit.exportVolumeMount.path=/tmp/violations \ ``` @@ -273,7 +261,7 @@ data: path: /tmp/violations exportSidecar: name: reader - image: openpolicyagent/fake-reader:dev + image: ghcr.io/open-policy-agent/fake-reader:latest imagePullPolicy: Always securityContext: allowPrivilegeEscalation: false @@ -292,7 +280,7 @@ data: ``` ::: -3. Create the constraint templates and constraints, and make sure audit ran by checking constraints. If constraint status is updated with information such as `auditTimeStamp` or `totalViolations`, then audit has ran at least once. Additionally, populated `TOTAL-VIOLATIONS` field for all constraints while listing constraints also indicates that audit has ran at least once. +2. Create the constraint templates and constraints, and make sure audit ran by checking constraints. If constraint status is updated with information such as `auditTimeStamp` or `totalViolations`, then audit has ran at least once. Additionally, populated `TOTAL-VIOLATIONS` field for all constraints while listing constraints also indicates that audit has ran at least once. ```log kubectl get constraint @@ -300,7 +288,7 @@ data: pod-must-have-test 0 ``` -4. Finally, check the sidecar reader logs to see the violations written. +3. Finally, check the sidecar reader logs to see the violations written. ```log kubectl logs -l gatekeeper.sh/operation=audit -c go-sub -n gatekeeper-system From 19ffcd63ef538977b7b989b59f33dacc39abc77c Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Fri, 25 Apr 2025 23:50:37 +0000 Subject: [PATCH 31/33] updating actions to latest versions, updating docs Signed-off-by: Jaydip Gabani --- .github/workflows/disk-export.yaml | 2 +- test/export/fake-reader/Dockerfile | 4 ++-- test/export/fake-subscriber/Dockerfile | 4 ++-- website/docs/export.md | 3 --- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml index 0a805282364..36d2e6987cf 100644 --- a/.github/workflows/disk-export.yaml +++ b/.github/workflows/disk-export.yaml @@ -19,7 +19,7 @@ jobs: timeout-minutes: 15 steps: - name: Harden Runner - uses: step-security/harden-runner@cb605e52c26070c328afc4562f0b4ada7618a84e # v2.10.4 + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 with: egress-policy: audit diff --git a/test/export/fake-reader/Dockerfile b/test/export/fake-reader/Dockerfile index 54b7845a2d2..b703ecbd7d5 100644 --- a/test/export/fake-reader/Dockerfile +++ b/test/export/fake-reader/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$BUILDPLATFORM golang:1.23-bookworm@sha256:462f68e1109cc0415f58ba591f11e650b38e193fddc4a683a3b77d29be8bfb2c AS builder +FROM --platform=$BUILDPLATFORM golang:1.24-bookworm@sha256:00eccd446e023d3cd9566c25a6e6a02b90db3e1e0bbe26a48fc29cd96e800901 AS builder ARG TARGETPLATFORM ARG TARGETOS @@ -20,7 +20,7 @@ RUN go mod init && go mod tidy && go mod vendor RUN go build -o main -FROM gcr.io/distroless/static-debian12@sha256:8dd8d3ca2cf283383304fd45a5c9c74d5f2cd9da8d3b077d720e264880077c65 +FROM gcr.io/distroless/static-debian12@sha256:3d0f463de06b7ddff27684ec3bfd0b54a425149d0f8685308b1fdf297b0265e9 WORKDIR / diff --git a/test/export/fake-subscriber/Dockerfile b/test/export/fake-subscriber/Dockerfile index d4992b10fef..c09a00ad1df 100644 --- a/test/export/fake-subscriber/Dockerfile +++ b/test/export/fake-subscriber/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$BUILDPLATFORM golang:1.24-bookworm@sha256:fa1a01d362a7b9df68b021d59a124d28cae6d99ebd1a876e3557c4dd092f1b1d AS builder +FROM --platform=$BUILDPLATFORM golang:1.24-bookworm@sha256:00eccd446e023d3cd9566c25a6e6a02b90db3e1e0bbe26a48fc29cd96e800901 AS builder ARG TARGETPLATFORM ARG TARGETOS @@ -20,7 +20,7 @@ RUN go mod init && go mod tidy && go mod vendor RUN go build -o main -FROM gcr.io/distroless/static-debian12@sha256:8dd8d3ca2cf283383304fd45a5c9c74d5f2cd9da8d3b077d720e264880077c65 +FROM gcr.io/distroless/static-debian12@sha256:3d0f463de06b7ddff27684ec3bfd0b54a425149d0f8685308b1fdf297b0265e9 WORKDIR / diff --git a/website/docs/export.md b/website/docs/export.md index 5f088eb9a15..6747acc7674 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -224,9 +224,6 @@ data: --set audit.channel=audit-channel \ --set audit.exportConfig.maxAuditResults=3 \ --set exportBackend=disk \ - --set audit.exportSidecar.image=ghcr.io/open-policy-agent/fake-reader:latest \ - --set audit.exportSidecar.imagePullPolicy=Always \ - --set audit.exportVolumeMount.path=/tmp/violations \ ``` **Note**: After the audit pod starts, verify that it contains two running containers. From ad3c19d2fb2c95c82ee87ded693a6732fef2bdbe Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Mon, 28 Apr 2025 21:03:42 +0000 Subject: [PATCH 32/33] addressing feedbacks Signed-off-by: Jaydip Gabani --- pkg/audit/manager.go | 2 +- pkg/export/disk/disk.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/audit/manager.go b/pkg/audit/manager.go index 17ffd3b29e4..e3ef3e28dc5 100644 --- a/pkg/audit/manager.go +++ b/pkg/audit/manager.go @@ -262,8 +262,8 @@ func (am *Manager) audit(ctx context.Context) error { exportErrorMap := make(map[string]error) if *exportController.ExportEnabled { if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: exportutil.AuditStartedMsg, ID: timestamp}); err != nil { - exportErrorMap[strings.Split(err.Error(), ":")[0]] = err am.log.Error(err, "failed to export audit start message") + exportErrorMap[strings.Split(err.Error(), ":")[0]] = err } } // record audit latency diff --git a/pkg/export/disk/disk.go b/pkg/export/disk/disk.go index 0a9fbc4d00e..ce6121455f5 100644 --- a/pkg/export/disk/disk.go +++ b/pkg/export/disk/disk.go @@ -157,7 +157,7 @@ func (conn *Connection) handleAuditStart(auditID string, topic string) error { dir := path.Join(conn.Path, topic) if err := os.MkdirAll(dir, 0o777); err != nil { - return fmt.Errorf("failed to create directories:` %w", err) + return fmt.Errorf("failed to create directories: %w", err) } // Set the dir permissions to make sure reader can modify files if need be after the lock is released. From ab3d3109812d8270e1f247644de43297f7ba613f Mon Sep 17 00:00:00 2001 From: Jaydip Gabani Date: Mon, 5 May 2025 22:47:13 +0000 Subject: [PATCH 33/33] updating docs Signed-off-by: Jaydip Gabani --- website/docs/export.md | 62 ++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 29 deletions(-) diff --git a/website/docs/export.md b/website/docs/export.md index 6747acc7674..f4b93106aae 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -217,6 +217,39 @@ data: 1. Deploy Gatekeeper with disk export configurations. + Below are the default configurations that enable disk export and add a sidecar container to the Gatekeeper audit pod: + + ```yaml + audit: + exportVolume: + name: tmp-violations + emptyDir: {} + exportVolumeMount: + path: /tmp/violations + exportSidecar: + name: reader + image: ghcr.io/open-policy-agent/fake-reader:latest + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations + ``` + + :::warning + The reader sidecar image `ghcr.io/open-policy-agent/fake-reader:latest` and the provided default configurations are intended for demonstration and quickstart purposes only. They are not recommended for production environments. For production use, it is advised to create and configure a custom sidecar image tailored to your specific requirements. + ::: + ```shell helm upgrade --install gatekeeper gatekeeper/gatekeeper --namespace gatekeeper-system \ --set enableViolationExport=true \ @@ -246,35 +279,6 @@ data: exportSidecar: ``` - - Below are the defaults: - - ```yaml - audit: - exportVolume: - name: tmp-violations - emptyDir: {} - exportVolumeMount: - path: /tmp/violations - exportSidecar: - name: reader - image: ghcr.io/open-policy-agent/fake-reader:latest - imagePullPolicy: Always - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 999 - runAsNonRoot: true - runAsUser: 1000 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /tmp/violations - name: tmp-violations - ``` ::: 2. Create the constraint templates and constraints, and make sure audit ran by checking constraints. If constraint status is updated with information such as `auditTimeStamp` or `totalViolations`, then audit has ran at least once. Additionally, populated `TOTAL-VIOLATIONS` field for all constraints while listing constraints also indicates that audit has ran at least once.