diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 081d0136be9..d3478b73a6a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -63,3 +63,17 @@ updates: interval: "weekly" commit-message: prefix: "chore" + + - package-ecosystem: "docker" + directory: "/test/export/fake-subscriber" + schedule: + interval: "weekly" + commit-message: + prefix: "chore" + + - package-ecosystem: "docker" + directory: "/test/export/fake-reader" + schedule: + interval: "weekly" + commit-message: + prefix: "chore" diff --git a/.github/workflows/dapr-export.yaml b/.github/workflows/dapr-export.yaml index 554a5534c05..64aae3b2c4a 100644 --- a/.github/workflows/dapr-export.yaml +++ b/.github/workflows/dapr-export.yaml @@ -50,8 +50,8 @@ jobs: kind load docker-image --name kind gatekeeper-e2e:latest gatekeeper-crds:latest kubectl create ns gatekeeper-system make e2e-publisher-deploy - make e2e-helm-deploy HELM_REPO=gatekeeper-e2e HELM_CRD_REPO=gatekeeper-crds HELM_RELEASE=latest ENABLE_EXPORT=true LOG_LEVEL=DEBUG - make test-e2e ENABLE_EXPORT_TESTS=1 + make e2e-helm-deploy HELM_REPO=gatekeeper-e2e HELM_CRD_REPO=gatekeeper-crds HELM_RELEASE=latest ENABLE_EXPORT=true LOG_LEVEL=DEBUG AUDIT_CHANNEL=audit-channel EXPORT_BACKEND=dapr + make test-e2e ENABLE_EXPORT_TESTS=1 EXPORT_BACKEND=dapr - name: Save logs if: ${{ always() }} diff --git a/.github/workflows/disk-export.yaml b/.github/workflows/disk-export.yaml new file mode 100644 index 00000000000..36d2e6987cf --- /dev/null +++ b/.github/workflows/disk-export.yaml @@ -0,0 +1,61 @@ +name: disk-export +on: + push: + paths: + - "pkg/export/dapr" + - "pkg/export/disk" + - "test/export/**" + pull_request: + paths: + - "pkg/export/dapr" + - "pkg/export/disk" + - "test/export/**" +permissions: read-all + +jobs: + disk_test: + name: "Disk export test" + runs-on: ubuntu-22.04 + timeout-minutes: 15 + steps: + - name: Harden Runner + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - name: Check out code into the Go module directory + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Bootstrap e2e + run: | + mkdir -p $GITHUB_WORKSPACE/bin + echo "$GITHUB_WORKSPACE/bin" >> $GITHUB_PATH + make e2e-bootstrap + + - name: Run e2e + run: | + make docker-buildx IMG=gatekeeper-e2e:latest + make e2e-build-load-externaldata-image + make e2e-reader-build-image + make docker-buildx-crds CRD_IMG=gatekeeper-crds:latest + kind load docker-image --name kind gatekeeper-e2e:latest fake-reader:latest gatekeeper-crds:latest + kubectl create ns gatekeeper-system + + make e2e-helm-deploy HELM_REPO=gatekeeper-e2e HELM_CRD_REPO=gatekeeper-crds HELM_RELEASE=latest ENABLE_EXPORT=true LOG_LEVEL=DEBUG EXPORT_BACKEND=disk FAKE_READER_IMAGE_PULL_POLICY=Never AUDIT_CONNECTION=audit-connection AUDIT_CHANNEL=audit-channel EXPORT_DISK_PATH=/tmp/violations MAX_AUDIT_RESULTS=3 FAKE_READER_IMAGE=fake-reader:latest + + make test-e2e ENABLE_EXPORT_TESTS=1 EXPORT_BACKEND=disk + + - name: Save logs + if: ${{ always() }} + run: | + kubectl logs -n gatekeeper-system -l control-plane=audit-controller -c manager --tail=-1 > logs-audit-manager.json + kubectl logs -n gatekeeper-system -l control-plane=audit-controller -c reader --tail=-1 > logs-audit-export.json + + - name: Upload artifacts + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + if: ${{ always() }} + with: + name: export-logs + path: | + logs-*.json + diff --git a/Makefile b/Makefile index 94f2b0a041d..3d06c46b3d7 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,38 @@ NODE_VERSION ?= 16-bullseye-slim YQ_VERSION ?= 4.30.6 HELM_ARGS ?= +HELM_DAPR_EXPORT_ARGS := --set-string auditPodAnnotations.dapr\\.io/enabled=true \ + --set-string auditPodAnnotations.dapr\\.io/app-id=audit \ + --set-string auditPodAnnotations.dapr\\.io/metrics-port=9999 \ + +HELM_DISK_EXPORT_ARGS := --set audit.exportVolumeMount.path=${EXPORT_DISK_PATH} \ + --set audit.exportConfig.maxAuditResults=${MAX_AUDIT_RESULTS} \ + --set audit.exportSidecar.image=${FAKE_READER_IMAGE} \ + --set audit.exportSidecar.imagePullPolicy=${FAKE_READER_IMAGE_PULL_POLICY} \ + +HELM_EXPORT_ARGS := --set enableViolationExport=${ENABLE_EXPORT} \ + --set audit.connection=${AUDIT_CONNECTION} \ + --set audit.channel=${AUDIT_CHANNEL} \ + --set exportBackend=${EXPORT_BACKEND} \ + +HELM_EXTRA_ARGS := --set image.repository=${HELM_REPO} \ + --set image.crdRepository=${HELM_CRD_REPO} \ + --set image.release=${HELM_RELEASE} \ + --set postInstall.labelNamespace.image.repository=${HELM_CRD_REPO} \ + --set postInstall.labelNamespace.image.tag=${HELM_RELEASE} \ + --set postInstall.labelNamespace.enabled=true \ + --set postInstall.probeWebhook.enabled=true \ + --set emitAdmissionEvents=true \ + --set emitAuditEvents=true \ + --set admissionEventsInvolvedNamespace=true \ + --set auditEventsInvolvedNamespace=true \ + --set disabledBuiltins={http.send} \ + --set logMutations=true \ + --set logLevel=${LOG_LEVEL} \ + --set defaultCreateVAPForTemplates=${GENERATE_VAP} \ + --set defaultCreateVAPBindingForConstraints=${GENERATE_VAPBINDING} \ + --set mutationAnnotations=true;\ + GATEKEEPER_NAMESPACE ?= gatekeeper-system # When updating this, make sure to update the corresponding action in @@ -48,6 +80,8 @@ GOLANGCI_LINT_CACHE := $(shell pwd)/.tmp/golangci-lint BENCHMARK_FILE_NAME ?= benchmarks.txt FAKE_SUBSCRIBER_IMAGE ?= fake-subscriber:latest +FAKE_READER_IMAGE ?= fake-reader:latest +FAKE_READER_IMAGE_PULL_POLICY ?= IfNotPresent ROOT_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) BIN_DIR := $(abspath $(ROOT_DIR)/bin) @@ -106,6 +140,29 @@ MANAGER_IMAGE_PATCH := "apiVersion: apps/v1\ \n - --log-level=${LOG_LEVEL}\ \n" +HELM_EXPORT_VARIABLES := "audit:\ +\n exportVolume:\ +\n name: tmp-violations\ +\n emptyDir: {}\ +\n exportSidecar:\ +\n name: go-sub\ +\n image: ${FAKE_READER_IMAGE}\ +\n imagePullPolicy: ${FAKE_READER_IMAGE_PULL_POLICY}\ +\n securityContext:\ +\n allowPrivilegeEscalation: false\ +\n capabilities:\ +\n drop:\ +\n - ALL\ +\n readOnlyRootFilesystem: true\ +\n runAsGroup: 999\ +\n runAsNonRoot: true\ +\n runAsUser: 1000\ +\n seccompProfile:\ +\n type: RuntimeDefault\ +\n volumeMounts:\ +\n - mountPath: /tmp/violations\ +\n name: tmp-violations" + # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) GOBIN=$(shell go env GOPATH)/bin @@ -202,53 +259,20 @@ e2e-helm-install: cd .staging/helm && tar -xvf helmbin.tar.gz ./.staging/helm/linux-amd64/helm version --client -e2e-helm-deploy: e2e-helm-install +e2e-helm-deploy: e2e-helm-install $(LOCALBIN) ifeq ($(ENABLE_EXPORT),true) ./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name-template=gatekeeper \ --namespace ${GATEKEEPER_NAMESPACE} \ --debug --wait \ - --set image.repository=${HELM_REPO} \ - --set image.crdRepository=${HELM_CRD_REPO} \ - --set image.release=${HELM_RELEASE} \ - --set postInstall.labelNamespace.image.repository=${HELM_CRD_REPO} \ - --set postInstall.labelNamespace.image.tag=${HELM_RELEASE} \ - --set postInstall.labelNamespace.enabled=true \ - --set postInstall.probeWebhook.enabled=true \ - --set emitAdmissionEvents=true \ - --set emitAuditEvents=true \ - --set admissionEventsInvolvedNamespace=true \ - --set auditEventsInvolvedNamespace=true \ - --set disabledBuiltins={http.send} \ - --set logMutations=true \ - --set enableViolationExport=${ENABLE_EXPORT} \ - --set audit.connection=${AUDIT_CONNECTION} \ - --set audit.channel=${AUDIT_CHANNEL} \ - --set-string auditPodAnnotations.dapr\\.io/enabled=true \ - --set-string auditPodAnnotations.dapr\\.io/app-id=audit \ - --set-string auditPodAnnotations.dapr\\.io/metrics-port=9999 \ - --set logLevel=${LOG_LEVEL} \ - --set mutationAnnotations=true; + $(HELM_EXPORT_ARGS) \ + $(if $(filter disk,$(EXPORT_BACKEND)),$(HELM_DISK_EXPORT_ARGS)) \ + $(if $(filter dapr,$(EXPORT_BACKEND)),$(HELM_DAPR_EXPORT_ARGS)) \ + $(HELM_EXTRA_ARGS) else ./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name-template=gatekeeper \ --namespace ${GATEKEEPER_NAMESPACE} --create-namespace \ --debug --wait \ - --set image.repository=${HELM_REPO} \ - --set image.crdRepository=${HELM_CRD_REPO} \ - --set image.release=${HELM_RELEASE} \ - --set postInstall.labelNamespace.image.repository=${HELM_CRD_REPO} \ - --set postInstall.labelNamespace.image.tag=${HELM_RELEASE} \ - --set postInstall.labelNamespace.enabled=true \ - --set postInstall.probeWebhook.enabled=true \ - --set emitAdmissionEvents=true \ - --set emitAuditEvents=true \ - --set admissionEventsInvolvedNamespace=true \ - --set auditEventsInvolvedNamespace=true \ - --set disabledBuiltins={http.send} \ - --set logMutations=true \ - --set logLevel=${LOG_LEVEL} \ - --set defaultCreateVAPForTemplates=${GENERATE_VAP} \ - --set defaultCreateVAPBindingForConstraints=${GENERATE_VAPBINDING} \ - --set mutationAnnotations=true; + $(HELM_EXTRA_ARGS) endif e2e-helm-upgrade-init: e2e-helm-install @@ -273,23 +297,7 @@ e2e-helm-upgrade: ./.staging/helm/linux-amd64/helm upgrade gatekeeper manifest_staging/charts/gatekeeper \ --namespace ${GATEKEEPER_NAMESPACE} \ --debug --wait \ - --set image.repository=${HELM_REPO} \ - --set image.crdRepository=${HELM_CRD_REPO} \ - --set image.release=${HELM_RELEASE} \ - --set postInstall.labelNamespace.image.repository=${HELM_CRD_REPO} \ - --set postInstall.labelNamespace.image.tag=${HELM_RELEASE} \ - --set postInstall.labelNamespace.enabled=true \ - --set postInstall.probeWebhook.enabled=true \ - --set emitAdmissionEvents=true \ - --set emitAuditEvents=true \ - --set admissionEventsInvolvedNamespace=true \ - --set auditEventsInvolvedNamespace=true \ - --set disabledBuiltins={http.send} \ - --set logMutations=true \ - --set logLevel=${LOG_LEVEL} \ - --set defaultCreateVAPForTemplates=${GENERATE_VAP} \ - --set defaultCreateVAPBindingForConstraints=${GENERATE_VAPBINDING} \ - --set mutationAnnotations=true;\ + $(HELM_EXTRA_ARGS) e2e-subscriber-build-load-image: docker buildx build --platform="linux/amd64" -t ${FAKE_SUBSCRIBER_IMAGE} --load -f test/export/fake-subscriber/Dockerfile test/export/fake-subscriber @@ -302,7 +310,10 @@ e2e-subscriber-deploy: e2e-publisher-deploy: kubectl get secret redis --namespace=default -o yaml | sed 's/namespace: .*/namespace: gatekeeper-system/' | kubectl apply -f - - kubectl apply -f test/export/publish-components.yaml + kubectl apply -f test/export/fake-subscriber/manifest/publish-components.yaml + +e2e-reader-build-image: + docker buildx build --platform="$(PLATFORM)" -t ${FAKE_READER_IMAGE} --load -f test/export/fake-reader/Dockerfile test/export/fake-reader # Build manager binary manager: generate diff --git a/cmd/build/helmify/main.go b/cmd/build/helmify/main.go index 074c09d5127..a4a637acc01 100644 --- a/cmd/build/helmify/main.go +++ b/cmd/build/helmify/main.go @@ -141,7 +141,8 @@ func (ks *kindSet) Write() error { obj = "{{- if not .Values.disableAudit }}\n" + obj + "{{- end }}\n" obj = strings.Replace(obj, " labels:", " labels:\n {{- include \"gatekeeper.podLabels\" . | nindent 8 }}\n {{- include \"audit.podLabels\" . | nindent 8 }}\n {{- include \"gatekeeper.commonLabels\" . | nindent 8 }}", 1) obj = strings.Replace(obj, " priorityClassName: system-cluster-critical", " {{- if .Values.audit.priorityClassName }}\n priorityClassName: {{ .Values.audit.priorityClassName }}\n {{- end }}", 1) - obj = strings.Replace(obj, " - emptyDir: {}", " {{- if .Values.audit.writeToRAMDisk }}\n - emptyDir:\n medium: Memory\n {{ else }}\n - emptyDir: {}\n {{- end }}", 1) + obj = strings.Replace(obj, " name: tmp-volume", " name: tmp-volume\n {{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default \"\" | lower) \"disk\") }}\n - mountPath: {{ .Values.audit.exportVolumeMount.path }}\n name: {{ .Values.audit.exportVolume.name }}\n {{- end }}\n {{ if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default \"\" | lower) \"disk\") }}\n - {{ toYaml .Values.audit.exportSidecar | nindent 8 }}\n {{- end }}", 1) + obj = strings.Replace(obj, " - emptyDir: {}", " {{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default \"\" | lower) \"disk\") }}\n - {{- toYaml .Values.audit.exportVolume | nindent 8 }}\n {{- end }}\n {{- if .Values.audit.writeToRAMDisk }}\n - emptyDir:\n medium: Memory\n {{ else }}\n - emptyDir: {}\n {{- end }}", 1) } if name == "gatekeeper-manager-role" && kind == "Role" { diff --git a/cmd/build/helmify/static/README.md b/cmd/build/helmify/static/README.md index dd2fe03a235..9e54a80d25d 100644 --- a/cmd/build/helmify/static/README.md +++ b/cmd/build/helmify/static/README.md @@ -223,9 +223,14 @@ information._ | audit.readinessTimeout | Timeout in seconds for audit's readiness probe | `1` | | audit.livenessTimeout | Timeout in seconds for the audit's liveness probe | `1` | | audit.logLevel | The minimum log level for audit, takes precedence over `logLevel` when specified | `null` | -| enableViolationExport | (alpha) Enable exporting violations to external systems | `false` | +| enableViolationExport | (alpha) Enable exporting violations to external systems | `false` | | audit.connection | (alpha) Connection name for exporting audit violation messages | `audit-connection` | | audit.channel | (alpha) Channel name for exporting audit violation messages | `audit-channel` | +| audit.exportVolume | (alpha) Volume for audit pod to export violations. | `{"name":"tmp-violations","emptyDir":{}}` | +| audit.exportVolumeMount.path | (alpha) VolumeMount for audit pod manager container to export violations and sidecar container to read from. | `/tmp/violations` | +| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | `{"name":"reader","image":"ghcr.io/open-policy-agent/fake-reader:latest","imagePullPolicy":"Always","securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":999,"runAsNonRoot":true,"runAsUser":1000,"seccompProfile":{"type":"RuntimeDefault"}},"volumeMounts":[{"mountPath":"/tmp/violations","name":"tmp-violations"}]}` | +| audit.exportConfig.maxAuditResults | (alpha) Maximum number of audit results that can be stored in the export path. | 3 | +| exportBackend | (alpha) Backend to use for exporting violations, i.e. dapr, disk. | "" | | replicas | The number of Gatekeeper replicas to deploy for the webhook | `3` | | podAnnotations | The annotations to add to the Gatekeeper pods | `container.seccomp.security.alpha.kubernetes.io/manager: runtime/default` | | podLabels | The labels to add to the Gatekeeper pods | `{}` | diff --git a/cmd/build/helmify/static/templates/gatekeeper-audit-violation-export-config.yaml b/cmd/build/helmify/static/templates/gatekeeper-audit-violation-export-config.yaml new file mode 100644 index 00000000000..08244a8bb50 --- /dev/null +++ b/cmd/build/helmify/static/templates/gatekeeper-audit-violation-export-config.yaml @@ -0,0 +1,15 @@ +--- +{{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default "" | lower) "disk") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: '{{ .Values.audit.connection }}' + namespace: '{{ .Release.Namespace }}' +data: + driver: '{{ .Values.exportBackend }}' + config: | + { + "path": "{{ .Values.audit.exportVolumeMount.path }}", + "maxAuditResults": {{ .Values.audit.exportConfig.maxAuditResults }} + } +{{- end }} diff --git a/cmd/build/helmify/static/values.yaml b/cmd/build/helmify/static/values.yaml index 89ff2ef86d4..40e76911964 100644 --- a/cmd/build/helmify/static/values.yaml +++ b/cmd/build/helmify/static/values.yaml @@ -227,7 +227,33 @@ controllerManager: # - from: # - ipBlock: # cidr: 0.0.0.0/0 +exportBackend: "" audit: + exportConfig: + maxAuditResults: 3 + exportVolumeMount: + path: /tmp/violations + exportVolume: + name: tmp-violations + emptyDir: {} + exportSidecar: + name: reader + image: ghcr.io/open-policy-agent/fake-reader:latest + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations serviceAccount: name: gatekeeper-admin containerName: manager diff --git a/manifest_staging/charts/gatekeeper/README.md b/manifest_staging/charts/gatekeeper/README.md index dd2fe03a235..9e54a80d25d 100644 --- a/manifest_staging/charts/gatekeeper/README.md +++ b/manifest_staging/charts/gatekeeper/README.md @@ -223,9 +223,14 @@ information._ | audit.readinessTimeout | Timeout in seconds for audit's readiness probe | `1` | | audit.livenessTimeout | Timeout in seconds for the audit's liveness probe | `1` | | audit.logLevel | The minimum log level for audit, takes precedence over `logLevel` when specified | `null` | -| enableViolationExport | (alpha) Enable exporting violations to external systems | `false` | +| enableViolationExport | (alpha) Enable exporting violations to external systems | `false` | | audit.connection | (alpha) Connection name for exporting audit violation messages | `audit-connection` | | audit.channel | (alpha) Channel name for exporting audit violation messages | `audit-channel` | +| audit.exportVolume | (alpha) Volume for audit pod to export violations. | `{"name":"tmp-violations","emptyDir":{}}` | +| audit.exportVolumeMount.path | (alpha) VolumeMount for audit pod manager container to export violations and sidecar container to read from. | `/tmp/violations` | +| audit.exportSidecar | (alpha) Sidecar container to read violations from disk. | `{"name":"reader","image":"ghcr.io/open-policy-agent/fake-reader:latest","imagePullPolicy":"Always","securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":999,"runAsNonRoot":true,"runAsUser":1000,"seccompProfile":{"type":"RuntimeDefault"}},"volumeMounts":[{"mountPath":"/tmp/violations","name":"tmp-violations"}]}` | +| audit.exportConfig.maxAuditResults | (alpha) Maximum number of audit results that can be stored in the export path. | 3 | +| exportBackend | (alpha) Backend to use for exporting violations, i.e. dapr, disk. | "" | | replicas | The number of Gatekeeper replicas to deploy for the webhook | `3` | | podAnnotations | The annotations to add to the Gatekeeper pods | `container.seccomp.security.alpha.kubernetes.io/manager: runtime/default` | | podLabels | The labels to add to the Gatekeeper pods | `{}` | diff --git a/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml index 552beb1fa7a..6b27f618b69 100644 --- a/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml +++ b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml @@ -162,6 +162,13 @@ spec: readOnly: true - mountPath: /tmp/audit name: tmp-volume + {{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default "" | lower) "disk") }} + - mountPath: {{ .Values.audit.exportVolumeMount.path }} + name: {{ .Values.audit.exportVolume.name }} + {{- end }} + {{ if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default "" | lower) "disk") }} + - {{ toYaml .Values.audit.exportSidecar | nindent 8 }} + {{- end }} dnsPolicy: {{ .Values.audit.dnsPolicy }} hostNetwork: {{ .Values.audit.hostNetwork }} imagePullSecrets: @@ -182,6 +189,9 @@ spec: secret: defaultMode: 420 secretName: gatekeeper-webhook-server-cert + {{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default "" | lower) "disk") }} + - {{- toYaml .Values.audit.exportVolume | nindent 8 }} + {{- end }} {{- if .Values.audit.writeToRAMDisk }} - emptyDir: medium: Memory diff --git a/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-export-config.yaml b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-export-config.yaml new file mode 100644 index 00000000000..08244a8bb50 --- /dev/null +++ b/manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-violation-export-config.yaml @@ -0,0 +1,15 @@ +--- +{{- if and (.Values.enableViolationExport) (eq (.Values.exportBackend | default "" | lower) "disk") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: '{{ .Values.audit.connection }}' + namespace: '{{ .Release.Namespace }}' +data: + driver: '{{ .Values.exportBackend }}' + config: | + { + "path": "{{ .Values.audit.exportVolumeMount.path }}", + "maxAuditResults": {{ .Values.audit.exportConfig.maxAuditResults }} + } +{{- end }} diff --git a/manifest_staging/charts/gatekeeper/values.yaml b/manifest_staging/charts/gatekeeper/values.yaml index 89ff2ef86d4..40e76911964 100644 --- a/manifest_staging/charts/gatekeeper/values.yaml +++ b/manifest_staging/charts/gatekeeper/values.yaml @@ -227,7 +227,33 @@ controllerManager: # - from: # - ipBlock: # cidr: 0.0.0.0/0 +exportBackend: "" audit: + exportConfig: + maxAuditResults: 3 + exportVolumeMount: + path: /tmp/violations + exportVolume: + name: tmp-violations + emptyDir: {} + exportSidecar: + name: reader + image: ghcr.io/open-policy-agent/fake-reader:latest + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations serviceAccount: name: gatekeeper-admin containerName: manager diff --git a/pkg/audit/manager.go b/pkg/audit/manager.go index d51b8bc73d0..e3ef3e28dc5 100644 --- a/pkg/audit/manager.go +++ b/pkg/audit/manager.go @@ -21,6 +21,7 @@ import ( exportController "github.com/open-policy-agent/gatekeeper/v3/pkg/controller/export" "github.com/open-policy-agent/gatekeeper/v3/pkg/expansion" "github.com/open-policy-agent/gatekeeper/v3/pkg/export" + exportutil "github.com/open-policy-agent/gatekeeper/v3/pkg/export/util" "github.com/open-policy-agent/gatekeeper/v3/pkg/logging" mutationtypes "github.com/open-policy-agent/gatekeeper/v3/pkg/mutation/types" "github.com/open-policy-agent/gatekeeper/v3/pkg/target" @@ -106,28 +107,6 @@ type StatusViolation struct { EnforcementActions []string `json:"enforcementActions,omitempty"` } -// ExportMsg represents export message for each violation. -type ExportMsg struct { - ID string `json:"id,omitempty"` - Details interface{} `json:"details,omitempty"` - EventType string `json:"eventType,omitempty"` - Group string `json:"group,omitempty"` - Version string `json:"version,omitempty"` - Kind string `json:"kind,omitempty"` - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - Message string `json:"message,omitempty"` - EnforcementAction string `json:"enforcementAction,omitempty"` - EnforcementActions []string `json:"enforcementActions,omitempty"` - ConstraintAnnotations map[string]string `json:"constraintAnnotations,omitempty"` - ResourceGroup string `json:"resourceGroup,omitempty"` - ResourceAPIVersion string `json:"resourceAPIVersion,omitempty"` - ResourceKind string `json:"resourceKind,omitempty"` - ResourceNamespace string `json:"resourceNamespace,omitempty"` - ResourceName string `json:"resourceName,omitempty"` - ResourceLabels map[string]string `json:"resourceLabels,omitempty"` -} - // A max PriorityQueue implements heap.Interface and holds StatusViolation. type SVQueue []*StatusViolation @@ -280,17 +259,32 @@ func (am *Manager) audit(ctx context.Context) error { timestamp := startTime.UTC().Format(time.RFC3339) am.log = log.WithValues(logging.AuditID, timestamp) logStart(am.log) + exportErrorMap := make(map[string]error) + if *exportController.ExportEnabled { + if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: exportutil.AuditStartedMsg, ID: timestamp}); err != nil { + am.log.Error(err, "failed to export audit start message") + exportErrorMap[strings.Split(err.Error(), ":")[0]] = err + } + } // record audit latency defer func() { - logFinish(am.log) endTime := time.Now() latency := endTime.Sub(startTime) + logFinish(am.log, latency) if err := am.reporter.reportLatency(latency); err != nil { am.log.Error(err, "failed to report latency") } if err := am.reporter.reportRunEnd(endTime); err != nil { am.log.Error(err, "failed to report run end time") } + if *exportController.ExportEnabled { + if err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, exportutil.ExportMsg{Message: exportutil.AuditCompletedMsg, ID: timestamp}); err != nil { + exportErrorMap[strings.Split(err.Error(), ":")[0]] = err + } + } + for _, v := range exportErrorMap { + am.log.Error(v, "failed to export audit violation") + } }() if err := am.reporter.reportRunStart(startTime); err != nil { @@ -334,10 +328,10 @@ func (am *Manager) audit(ctx context.Context) error { am.log.Error(err, "Auditing") } - am.addAuditResponsesToUpdateLists(updateLists, res, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp) + am.addAuditResponsesToUpdateLists(updateLists, res, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp, exportErrorMap) } else { am.log.Info("Auditing via discovery client") - err := am.auditResources(ctx, constraintsGVKs, updateLists, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp) + err := am.auditResources(ctx, constraintsGVKs, updateLists, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp, exportErrorMap) if err != nil { return err } @@ -371,6 +365,7 @@ func (am *Manager) auditResources( totalViolationsPerConstraint map[util.KindVersionName]int64, totalViolationsPerEnforcementAction map[util.EnforcementAction]int64, timestamp string, + exportErrorMap map[string]error, ) error { // delete all from cache dir before starting audit err := am.removeAllFromDir(*apiCacheDir, *auditChunkSize) @@ -558,7 +553,7 @@ func (am *Manager) auditResources( } // Loop through all subDirs to review all files for this kind. am.log.V(logging.DebugLevel).Info("Reviewing objects for GVK", "group", gv.Group, "version", gv.Version, "kind", kind) - err = am.reviewObjects(ctx, kind, folderCount, namespaceCache, updateLists, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp) + err = am.reviewObjects(ctx, kind, folderCount, namespaceCache, updateLists, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp, exportErrorMap) if err != nil { errs = append(errs, err) continue @@ -660,6 +655,7 @@ func (am *Manager) reviewObjects(ctx context.Context, kind string, folderCount i totalViolationsPerConstraint map[util.KindVersionName]int64, totalViolationsPerEnforcementAction map[util.EnforcementAction]int64, timestamp string, + exportErrorMap map[string]error, ) error { for i := 0; i < folderCount; i++ { // cache directory structure: @@ -744,7 +740,7 @@ func (am *Manager) reviewObjects(ctx context.Context, kind string, folderCount i if len(resp.Results()) > 0 { results := ToResults(&augmentedObj.Object, resp) - am.addAuditResponsesToUpdateLists(updateLists, results, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp) + am.addAuditResponsesToUpdateLists(updateLists, results, totalViolationsPerConstraint, totalViolationsPerEnforcementAction, timestamp, exportErrorMap) } } } @@ -864,6 +860,7 @@ func (am *Manager) addAuditResponsesToUpdateLists( totalViolationsPerConstraint map[util.KindVersionName]int64, totalViolationsPerEnforcementAction map[util.EnforcementAction]int64, timestamp string, + exportErrorMap map[string]error, ) { for _, r := range res { constraint := r.Constraint @@ -905,7 +902,7 @@ func (am *Manager) addAuditResponsesToUpdateLists( if *exportController.ExportEnabled { err := am.exportSystem.Publish(context.Background(), *auditConnection, *auditChannel, violationMsg(constraint, ea, r.ScopedEnforcementActions, gvk, namespace, name, msg, details, labels, timestamp)) if err != nil { - am.log.Error(err, "error exporting audit violation") + exportErrorMap[strings.Split(err.Error(), ":")[0]] = err } } if *emitAuditEvents { @@ -1136,10 +1133,11 @@ func logStart(l logr.Logger) { ) } -func logFinish(l logr.Logger) { +func logFinish(l logr.Logger, t time.Duration) { l.Info( "auditing is complete", logging.EventType, "audit_finished", + "duration", t.String(), ) } @@ -1162,7 +1160,7 @@ func violationMsg(constraint *unstructured.Unstructured, enforcementAction util. userConstraintAnnotations := constraint.GetAnnotations() delete(userConstraintAnnotations, "kubectl.kubernetes.io/last-applied-configuration") - return ExportMsg{ + return exportutil.ExportMsg{ Message: message, Details: details, ID: timestamp, diff --git a/pkg/controller/export/export_config_controller.go b/pkg/controller/export/export_config_controller.go index 43c7cb155d5..5a5718e4870 100644 --- a/pkg/controller/export/export_config_controller.go +++ b/pkg/controller/export/export_config_controller.go @@ -5,6 +5,7 @@ import ( "encoding/json" "flag" "fmt" + "strings" "github.com/open-policy-agent/gatekeeper/v3/pkg/export" "github.com/open-policy-agent/gatekeeper/v3/pkg/logging" @@ -122,7 +123,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( return reconcile.Result{}, err } - err = r.system.UpsertConnection(ctx, config, request.Name, cfg.Data["driver"]) + err = r.system.UpsertConnection(ctx, config, request.Name, strings.ToLower(cfg.Data["driver"])) if err != nil { return reconcile.Result{}, err } diff --git a/pkg/export/disk/disk.go b/pkg/export/disk/disk.go new file mode 100644 index 00000000000..ce6121455f5 --- /dev/null +++ b/pkg/export/disk/disk.go @@ -0,0 +1,311 @@ +package disk + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/open-policy-agent/gatekeeper/v3/pkg/export/util" + "github.com/open-policy-agent/gatekeeper/v3/pkg/logging" + "k8s.io/client-go/util/retry" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +type Connection struct { + // path to store audit logs + Path string `json:"path,omitempty"` + // max number of audit results to store + MaxAuditResults int `json:"maxAuditResults,omitempty"` + // File to write audit logs + File *os.File + + // current audit run file name + currentAuditRun string +} + +type Writer struct { + openConnections map[string]Connection +} + +const ( + Name = "disk" + maxAllowedAuditRuns = 5 + maxAuditResults = "maxAuditResults" + violationPath = "path" +) + +var Connections = &Writer{ + openConnections: make(map[string]Connection), +} + +var log = logf.Log.WithName("disk-driver").WithValues(logging.Process, "export") + +func (r *Writer) CreateConnection(_ context.Context, connectionName string, config interface{}) error { + path, maxResults, err := unmarshalConfig(config) + if err != nil { + return fmt.Errorf("error creating connection %s: %w", connectionName, err) + } + + r.openConnections[connectionName] = Connection{ + Path: path, + MaxAuditResults: int(maxResults), + } + return nil +} + +func (r *Writer) UpdateConnection(_ context.Context, connectionName string, config interface{}) error { + conn, exists := r.openConnections[connectionName] + if !exists { + return fmt.Errorf("connection %s for disk driver not found", connectionName) + } + + path, maxResults, err := unmarshalConfig(config) + if err != nil { + return fmt.Errorf("error updating connection %s: %w", connectionName, err) + } + + if conn.Path != path { + if conn.File != nil { + if err := conn.unlockAndCloseFile(); err != nil { + return fmt.Errorf("error updating connection %s, error closing file: %w", connectionName, err) + } + } + if err := os.RemoveAll(conn.Path); err != nil { + return fmt.Errorf("error updating connection %s, error deleting violations stored at old path: %w", connectionName, err) + } + conn.Path = path + conn.File = nil + } + + conn.MaxAuditResults = int(maxResults) + + r.openConnections[connectionName] = conn + return nil +} + +func (r *Writer) CloseConnection(connectionName string) error { + conn, ok := r.openConnections[connectionName] + if !ok { + return fmt.Errorf("connection %s not found for disk driver", connectionName) + } + delete(r.openConnections, connectionName) + if conn.File != nil { + if err := conn.unlockAndCloseFile(); err != nil { + return fmt.Errorf("connection is closed without removing respective violations. error closing file: %w", err) + } + } + err := os.RemoveAll(conn.Path) + return err +} + +func (r *Writer) Publish(_ context.Context, connectionName string, data interface{}, topic string) error { + conn, ok := r.openConnections[connectionName] + if !ok { + return fmt.Errorf("invalid connection: %s not found for disk driver", connectionName) + } + + var violation util.ExportMsg + if violation, ok = data.(util.ExportMsg); !ok { + return fmt.Errorf("invalid data type: cannot convert data to exportMsg") + } + + if violation.Message == util.AuditStartedMsg { + err := conn.handleAuditStart(violation.ID, topic) + if err != nil { + return fmt.Errorf("error handling audit start: %w", err) + } + r.openConnections[connectionName] = conn + } + + jsonData, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("error marshaling data: %w", err) + } + + if conn.File == nil { + return fmt.Errorf("failed to write violation: no file provided") + } + + _, err = conn.File.WriteString(string(jsonData) + "\n") + if err != nil { + return fmt.Errorf("error writing message to disk: %w", err) + } + + if violation.Message == util.AuditCompletedMsg { + err := conn.handleAuditEnd(topic) + if err != nil { + return fmt.Errorf("error handling audit end: %w", err) + } + conn.File = nil + conn.currentAuditRun = "" + r.openConnections[connectionName] = conn + } + return nil +} + +func (conn *Connection) handleAuditStart(auditID string, topic string) error { + // Replace ':' with '_' to avoid issues with file names in windows + conn.currentAuditRun = strings.ReplaceAll(auditID, ":", "_") + + dir := path.Join(conn.Path, topic) + if err := os.MkdirAll(dir, 0o777); err != nil { + return fmt.Errorf("failed to create directories: %w", err) + } + + // Set the dir permissions to make sure reader can modify files if need be after the lock is released. + if err := os.Chmod(dir, 0o777); err != nil { + return fmt.Errorf("failed to set directory permissions: %w", err) + } + + file, err := os.OpenFile(path.Join(dir, appendExtension(conn.currentAuditRun, "txt")), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o666) + if err != nil { + return fmt.Errorf("failed to open file: %w", err) + } + conn.File = file + err = retry.OnError(retry.DefaultBackoff, func(_ error) bool { + return true + }, func() error { + return syscall.Flock(int(conn.File.Fd()), syscall.LOCK_EX) + }) + if err != nil { + return fmt.Errorf("failed to acquire lock: %w", err) + } + log.Info("Writing latest violations in", "filename", conn.File.Name()) + return nil +} + +func (conn *Connection) handleAuditEnd(topic string) error { + if err := retry.OnError(retry.DefaultBackoff, func(_ error) bool { + return true + }, conn.unlockAndCloseFile); err != nil { + return fmt.Errorf("error closing file: %w, %s", err, conn.currentAuditRun) + } + conn.File = nil + + readyFilePath := path.Join(conn.Path, topic, appendExtension(conn.currentAuditRun, "log")) + if err := os.Rename(path.Join(conn.Path, topic, appendExtension(conn.currentAuditRun, "txt")), readyFilePath); err != nil { + return fmt.Errorf("failed to rename file: %w, %s", err, conn.currentAuditRun) + } + // Set the file permissions to make sure reader can modify files if need be after the lock is released. + if err := os.Chmod(readyFilePath, 0o777); err != nil { + return fmt.Errorf("failed to set file permissions: %w", err) + } + log.Info("File renamed", "filename", readyFilePath) + + return conn.cleanupOldAuditFiles(topic) +} + +func (conn *Connection) unlockAndCloseFile() error { + if conn.File == nil { + return fmt.Errorf("no file to close") + } + fd := int(conn.File.Fd()) + if fd < 0 { + return fmt.Errorf("invalid file descriptor") + } + if err := syscall.Flock(fd, syscall.LOCK_UN); err != nil { + return fmt.Errorf("failed to release lock: %w", err) + } + if err := conn.File.Close(); err != nil { + return fmt.Errorf("failed to close file: %w", err) + } + return nil +} + +func (conn *Connection) cleanupOldAuditFiles(topic string) error { + dirPath := path.Join(conn.Path, topic) + files, err := getFilesSortedByModTimeAsc(dirPath) + if err != nil { + return fmt.Errorf("failed removing older audit files, error getting files sorted by mod time: %w", err) + } + var errs []error + for i := 0; i < len(files)-conn.MaxAuditResults; i++ { + if e := os.Remove(files[i]); e != nil { + errs = append(errs, fmt.Errorf("error removing file: %w", e)) + } + } + + return errors.Join(errs...) +} + +func getFilesSortedByModTimeAsc(dirPath string) ([]string, error) { + type fileInfo struct { + path string + modTime time.Time + } + var filesInfo []fileInfo + + err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + filesInfo = append(filesInfo, fileInfo{path: path, modTime: info.ModTime()}) + } + return nil + }) + if err != nil { + return nil, err + } + + sort.Slice(filesInfo, func(i, j int) bool { + return filesInfo[i].modTime.Before(filesInfo[j].modTime) + }) + + var sortedFiles []string + for _, fi := range filesInfo { + sortedFiles = append(sortedFiles, fi.path) + } + + return sortedFiles, nil +} + +func appendExtension(name string, ext string) string { + return name + "." + ext +} + +// validatePath checks if the provided path is valid and writable. +func validatePath(path string) error { + if path == "" { + return fmt.Errorf("path cannot be empty") + } + if strings.Contains(path, "..") { + return fmt.Errorf("path must not contain '..', dir traversal is not allowed") + } + // validate if the path is writable + if err := os.MkdirAll(path, 0o777); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + return nil +} + +func unmarshalConfig(config interface{}) (string, float64, error) { + cfg, ok := config.(map[string]interface{}) + if !ok { + return "", 0.0, fmt.Errorf("invalid config format") + } + + path, pathOk := cfg[violationPath].(string) + if !pathOk { + return "", 0.0, fmt.Errorf("missing or invalid 'path'") + } + if err := validatePath(path); err != nil { + return "", 0.0, fmt.Errorf("invalid path: %w", err) + } + maxResults, maxResultsOk := cfg[maxAuditResults].(float64) + if !maxResultsOk { + return "", 0.0, fmt.Errorf("missing or invalid 'maxAuditResults'") + } + if maxResults > maxAllowedAuditRuns { + return "", 0.0, fmt.Errorf("maxAuditResults cannot be greater than the maximum allowed audit runs: %d", maxAllowedAuditRuns) + } + return path, maxResults, nil +} \ No newline at end of file diff --git a/pkg/export/disk/disk_test.go b/pkg/export/disk/disk_test.go new file mode 100644 index 00000000000..05c33dd73d8 --- /dev/null +++ b/pkg/export/disk/disk_test.go @@ -0,0 +1,1006 @@ +package disk + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "slices" + "strings" + "syscall" + "testing" + + "github.com/open-policy-agent/gatekeeper/v3/pkg/export/util" +) + +func TestCreateConnection(t *testing.T) { + writer := &Writer{ + openConnections: make(map[string]Connection), + } + tmpPath := t.TempDir() + tests := []struct { + name string + connectionName string + config interface{} + err error + expectError bool + }{ + { + name: "Valid config", + connectionName: "conn1", + config: map[string]interface{}{ + "path": tmpPath, + "maxAuditResults": 3.0, + }, + expectError: false, + }, + { + name: "Invalid config format", + connectionName: "conn2", + config: map[int]interface{}{ + 1: "test", + }, + err: fmt.Errorf("error creating connection conn2: invalid config format"), + expectError: true, + }, + { + name: "Missing path", + connectionName: "conn3", + config: map[string]interface{}{ + "maxAuditResults": 10.0, + }, + err: fmt.Errorf("error creating connection conn3: missing or invalid 'path'"), + expectError: true, + }, + { + name: "Missing maxAuditResults", + connectionName: "conn4", + config: map[string]interface{}{ + "path": tmpPath, + }, + err: fmt.Errorf("error creating connection conn4: missing or invalid 'maxAuditResults'"), + expectError: true, + }, + { + name: "Exceeding maxAuditResults", + connectionName: "conn4", + config: map[string]interface{}{ + "path": tmpPath, + "maxAuditResults": 10.0, + }, + err: fmt.Errorf("error creating connection conn4: maxAuditResults cannot be greater than the maximum allowed audit runs: 5"), + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := writer.CreateConnection(context.Background(), tt.connectionName, tt.config) + if tt.expectError && tt.err.Error() != err.Error() { + t.Errorf("CreateConnection() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + conn, exists := writer.openConnections[tt.connectionName] + if !exists { + t.Errorf("Connection %s was not created", tt.connectionName) + } + path, pathOk := tt.config.(map[string]interface{})["path"].(string) + if !pathOk { + t.Errorf("Failed to get path from config") + } + if conn.Path != path { + t.Errorf("Expected path %s, got %s", path, conn.Path) + } + info, err := os.Stat(path) + if err != nil { + t.Errorf("failed to stat path: %s", err.Error()) + } + if !info.IsDir() { + t.Errorf("path is not a directory") + } + maxAuditResults, maxResultsOk := tt.config.(map[string]interface{})["maxAuditResults"].(float64) + if !maxResultsOk { + t.Errorf("Failed to get maxAuditResults from config") + } + if conn.MaxAuditResults != int(maxAuditResults) { + t.Errorf("Expected maxAuditResults %d, got %d", int(maxAuditResults), conn.MaxAuditResults) + } + } + }) + } +} + +func TestUpdateConnection(t *testing.T) { + writer := &Writer{ + openConnections: make(map[string]Connection), + } + tmpPath := t.TempDir() + file, err := os.CreateTemp(tmpPath, "testfile") + if err != nil { + t.Errorf("Failed to create temp file: %v", err) + } + + err = syscall.Flock(int(file.Fd()), syscall.LOCK_EX) + if err != nil { + t.Errorf("Failed to lock file: %v", err) + } + + writer.openConnections["conn1"] = Connection{ + Path: tmpPath, + MaxAuditResults: 3, + File: file, + } + + tests := []struct { + name string + connectionName string + config interface{} + expectError bool + err error + }{ + { + name: "Valid update", + connectionName: "conn1", + config: map[string]interface{}{ + "path": t.TempDir(), + "maxAuditResults": 4.0, + }, + expectError: false, + err: nil, + }, + { + name: "Invalid config format", + connectionName: "conn1", + config: map[int]interface{}{ + 1: "test", + }, + expectError: true, + err: fmt.Errorf("error updating connection conn1: invalid config format"), + }, + { + name: "Connection not found", + connectionName: "conn2", + config: map[string]interface{}{ + "path": t.TempDir(), + "maxAuditResults": 2.0, + }, + expectError: true, + err: fmt.Errorf("connection conn2 for disk driver not found"), + }, + { + name: "Missing path", + connectionName: "conn1", + config: map[string]interface{}{ + "maxAuditResults": 2.0, + }, + expectError: true, + err: fmt.Errorf("error updating connection conn1: missing or invalid 'path'"), + }, + { + name: "Missing maxAuditResults", + connectionName: "conn1", + config: map[string]interface{}{ + "path": t.TempDir(), + }, + expectError: true, + err: fmt.Errorf("error updating connection conn1: missing or invalid 'maxAuditResults'"), + }, + { + name: "Exceeding maxAuditResults", + connectionName: "conn1", + config: map[string]interface{}{ + "path": t.TempDir(), + "maxAuditResults": 10.0, + }, + expectError: true, + err: fmt.Errorf("error updating connection conn1: maxAuditResults cannot be greater than the maximum allowed audit runs: 5"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := writer.UpdateConnection(context.Background(), tt.connectionName, tt.config) + if tt.expectError && tt.err.Error() != err.Error() { + t.Errorf("UpdateConnection() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + conn, exists := writer.openConnections[tt.connectionName] + if !exists { + t.Errorf("Connection %s was not found", tt.connectionName) + } + path, pathOk := tt.config.(map[string]interface{})["path"].(string) + if !pathOk { + t.Errorf("Failed to get path from config") + } + if conn.Path != path { + t.Errorf("Expected path %s, got %s", path, conn.Path) + } + info, err := os.Stat(path) + if err != nil { + t.Errorf("failed to stat path: %s", err.Error()) + } + if !info.IsDir() { + t.Errorf("path is not a directory") + } + maxAuditResults, maxResultsOk := tt.config.(map[string]interface{})["maxAuditResults"].(float64) + if !maxResultsOk { + t.Errorf("Failed to get maxAuditResults from config") + } + if conn.MaxAuditResults != int(maxAuditResults) { + t.Errorf("Expected maxAuditResults %d, got %d", int(maxAuditResults), conn.MaxAuditResults) + } + } + }) + } +} + +func TestCloseConnection(t *testing.T) { + // Add to check clean up + writer := &Writer{ + openConnections: make(map[string]Connection), + } + + tests := []struct { + name string + connectionName string + setup func() error + expectError bool + }{ + { + name: "Valid close", + connectionName: "conn1", + setup: func() error { + // Pre-create a connection to close + writer.openConnections["conn1"] = Connection{ + Path: t.TempDir(), + MaxAuditResults: 10, + } + return nil + }, + expectError: false, + }, + { + name: "Connection not found", + connectionName: "conn2", + setup: nil, + expectError: true, + }, + { + name: "Valid close with open and locked file", + connectionName: "conn3", + setup: func() error { + // Pre-create a connection to close + d := t.TempDir() + if err := os.MkdirAll(d, 0o755); err != nil { + return err + } + file, err := os.CreateTemp(d, "testfile") + if err != nil { + return err + } + writer.openConnections["conn3"] = Connection{ + Path: d, + MaxAuditResults: 10, + File: file, + } + return syscall.Flock(int(file.Fd()), syscall.LOCK_EX) + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + if err := tt.setup(); err != nil { + t.Errorf("Setup failed: %v", err) + } + } + err := writer.CloseConnection(tt.connectionName) + if (err != nil) != tt.expectError { + t.Errorf("CloseConnection() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + _, exists := writer.openConnections[tt.connectionName] + if exists { + t.Errorf("Connection %s was not closed", tt.connectionName) + } + } + }) + } +} + +func TestPublish(t *testing.T) { + writer := &Writer{ + openConnections: make(map[string]Connection), + } + + // Pre-create a connection to publish to + writer.openConnections["conn1"] = Connection{ + Path: t.TempDir(), + MaxAuditResults: 1, + } + + tests := []struct { + name string + connectionName string + data interface{} + topic string + expectError bool + }{ + { + name: "Valid publish - audit started", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit1", + Message: "audit is started", + }, + topic: "topic1", + expectError: false, + }, + { + name: "Valid publish - audit in progress", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit1", + Message: "audit is in progress", + }, + topic: "topic1", + expectError: false, + }, + { + name: "Valid publish - audit completed", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit1", + Message: "audit is completed", + }, + topic: "topic1", + expectError: false, + }, + { + name: "Invalid data type", + connectionName: "conn1", + data: "invalid data", + topic: "topic1", + expectError: true, + }, + { + name: "Connection not found", + connectionName: "conn2", + data: util.ExportMsg{ + ID: "audit1", + Message: "audit is started", + }, + topic: "topic1", + expectError: true, + }, + { + name: "Valid publish - 2nd audit started", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit2", + Message: "audit is started", + }, + topic: "topic1", + expectError: false, + }, + { + name: "Valid publish - 2nd audit in progress", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit2", + Message: "audit is in progress", + }, + topic: "topic1", + expectError: false, + }, + { + name: "Valid publish - 2nd audit completed", + connectionName: "conn1", + data: util.ExportMsg{ + ID: "audit2", + Message: "audit is completed", + }, + topic: "topic1", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := writer.Publish(context.Background(), tt.connectionName, tt.data, tt.topic) + if (err != nil) != tt.expectError { + t.Errorf("Publish() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + files, err := listFiles(path.Join(writer.openConnections[tt.connectionName].Path, tt.topic)) + if err != nil { + t.Errorf("Failed to list files: %v", err) + } + msg, ok := tt.data.(util.ExportMsg) + if !ok { + t.Errorf("Failed to convert data to ExportMsg") + } + if msg.Message == "audit is started" { + if len(files) > 2 { + t.Errorf("Expected <= 2 file, got %d, %v", len(files), files) + } + if slices.Contains(files, writer.openConnections[tt.connectionName].currentAuditRun+".txt") { + t.Errorf("Expected file %s to exist, but it does not", writer.openConnections[tt.connectionName].currentAuditRun+".txt") + } + } + if msg.Message == "audit is completed" { + if len(files) != 1 { + t.Errorf("Expected 1 file, got %d, %v", len(files), files) + } + if slices.Contains(files, msg.ID+".log") { + t.Errorf("Expected file %s to exist, but it does not, files: %v", msg.ID+".log", files) + } + content, err := os.ReadFile(files[0]) + if err != nil { + t.Errorf("Failed to read file: %v", err) + } + for _, msg := range []string{"audit is started", "audit is in progress", "audit is completed"} { + if !strings.Contains(string(content), msg) { + t.Errorf("Expected message %q in file %s, but it was not found", msg, files[0]) + } + } + } + } + }) + } +} + +func TestHandleAuditStart(t *testing.T) { + tests := []struct { + name string + connection Connection + auditID string + topic string + expectError bool + }{ + { + name: "Valid audit start", + connection: Connection{ + Path: t.TempDir(), + }, + auditID: "audit1", + topic: "topic1", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.connection.handleAuditStart(tt.auditID, tt.topic) + if (err != nil) != tt.expectError { + t.Errorf("handleAuditStart() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + expectedFileName := path.Join(tt.connection.Path, tt.topic, tt.auditID+".txt") + if tt.connection.currentAuditRun != tt.auditID { + t.Errorf("Expected currentAuditRun %s, got %s", tt.auditID, tt.connection.currentAuditRun) + } + if tt.connection.File == nil { + t.Errorf("Expected file to be opened, but it is nil") + } else { + if tt.connection.File.Name() != expectedFileName { + t.Errorf("Expected file name %s, got %s", expectedFileName, tt.connection.File.Name()) + } + tt.connection.File.Close() + } + } + }) + } +} + +func TestHandleAuditEnd(t *testing.T) { + tests := []struct { + name string + connection Connection + topic string + setup func(conn *Connection) error + expectError bool + expectedFile string + }{ + { + name: "Valid audit end", + connection: Connection{ + Path: t.TempDir(), + currentAuditRun: "audit1", + }, + topic: "topic1", + setup: func(conn *Connection) error { + dir := path.Join(conn.Path, "topic1") + if err := os.MkdirAll(dir, 0o755); err != nil { + return err + } + file, err := os.Create(path.Join(dir, conn.currentAuditRun+".txt")) + if err != nil { + return err + } + conn.File = file + return nil + }, + expectError: false, + }, + { + name: "Cleanup old audit files error", + connection: Connection{ + Path: t.TempDir(), + currentAuditRun: "audit1", + MaxAuditResults: 1, + }, + topic: "topic1", + setup: func(conn *Connection) error { + // Create an extra file to trigger cleanup + dir := path.Join(conn.Path, "topic1") + if err := os.MkdirAll(dir, 0o755); err != nil { + return err + } + if _, err := os.Create(path.Join(dir, "extra_audit.log")); err != nil { + return err + } + file, err := os.Create(path.Join(dir, conn.currentAuditRun+".txt")) + if err != nil { + return err + } + conn.File = file + return nil + }, + expectError: false, + expectedFile: "audit1.log", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + if err := tt.setup(&tt.connection); err != nil { + t.Errorf("Setup failed: %v", err) + } + } + err := tt.connection.handleAuditEnd(tt.topic) + if (err != nil) != tt.expectError { + t.Errorf("handleAuditEnd() error = %v, expectError %v", err, tt.expectError) + } + + if !tt.expectError { + files, err := listFiles(path.Join(tt.connection.Path, tt.topic)) + if err != nil { + t.Errorf("Failed to list files: %v", err) + } + if slices.Contains(files, tt.expectedFile) { + t.Errorf("Expected file %s to exist, but it does not. Files: %v", tt.expectedFile, files) + } + } + }) + } +} + +func listFiles(dir string) ([]string, error) { + var files []string + + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + files = append(files, path) + } + return nil + }) + + return files, err +} + +func TestUnlockAndCloseFile(t *testing.T) { + tests := []struct { + name string + connection Connection + setup func(conn *Connection) error + expectError bool + }{ + { + name: "Valid unlock and close", + connection: Connection{ + Path: t.TempDir(), + }, + setup: func(conn *Connection) error { + if err := os.MkdirAll(conn.Path, 0o755); err != nil { + return err + } + file, err := os.CreateTemp(conn.Path, "testfile") + if err != nil { + return err + } + conn.File = file + return syscall.Flock(int(file.Fd()), syscall.LOCK_EX) + }, + expectError: false, + }, + { + name: "No file to close", + connection: Connection{ + Path: t.TempDir(), + }, + setup: nil, + expectError: true, + }, + { + name: "Invalid file descriptor", + connection: Connection{ + Path: t.TempDir(), + }, + setup: func(conn *Connection) error { + file, err := os.CreateTemp(conn.Path, "testfile") + if err != nil { + return err + } + conn.File = file + file.Close() // Close the file to make the descriptor invalid + return nil + }, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + if err := tt.setup(&tt.connection); err != nil { + t.Errorf("Setup failed: %v", err) + } + } + err := tt.connection.unlockAndCloseFile() + if (err != nil) != tt.expectError { + t.Errorf("unlockAndCloseFile() error = %v, expectError %v", err, tt.expectError) + } + }) + } +} + +func TestCleanupOldAuditFiles(t *testing.T) { + tests := []struct { + name string + connection Connection + topic string + setup func(conn *Connection) error + expectError bool + expectedFiles int + }{ + { + name: "No files to clean up", + connection: Connection{ + Path: t.TempDir(), + MaxAuditResults: 5, + }, + topic: "topic1", + setup: func(conn *Connection) error { + return os.MkdirAll(path.Join(conn.Path, "topic1"), 0o755) + }, + expectError: false, + expectedFiles: 0, + }, + { + name: "Files within limit", + connection: Connection{ + Path: t.TempDir(), + MaxAuditResults: 5, + }, + topic: "topic1", + setup: func(conn *Connection) error { + dir := path.Join(conn.Path, "topic1") + if err := os.MkdirAll(dir, 0o755); err != nil { + return err + } + for i := 0; i < 3; i++ { + if _, err := os.Create(path.Join(dir, fmt.Sprintf("audit%d.txt", i))); err != nil { + return err + } + } + return nil + }, + expectError: false, + expectedFiles: 3, + }, + { + name: "Files exceeding limit", + connection: Connection{ + Path: t.TempDir(), + MaxAuditResults: 2, + }, + topic: "topic1", + setup: func(conn *Connection) error { + dir := path.Join(conn.Path, "topic1") + if err := os.MkdirAll(dir, 0o755); err != nil { + return err + } + for i := 0; i < 4; i++ { + if _, err := os.Create(path.Join(dir, fmt.Sprintf("audit%d.txt", i))); err != nil { + return err + } + } + return nil + }, + expectError: false, + expectedFiles: 2, + }, + { + name: "Error getting earliest file", + connection: Connection{ + Path: t.TempDir(), + MaxAuditResults: 2, + }, + topic: "topic1", + setup: nil, + expectError: true, + expectedFiles: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + if err := tt.setup(&tt.connection); err != nil { + t.Errorf("Setup failed: %v", err) + } + } + err := tt.connection.cleanupOldAuditFiles(tt.topic) + if (err != nil) != tt.expectError { + t.Errorf("cleanupOldAuditFiles() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + dir := path.Join(tt.connection.Path, tt.topic) + files, err := os.ReadDir(dir) + if err != nil { + t.Errorf("Failed to read directory: %v", err) + } + if len(files) != tt.expectedFiles { + t.Errorf("Expected %d files, got %d", tt.expectedFiles, len(files)) + } + } + }) + } +} + +func TestGetFilesSortedByModTimeAsc(t *testing.T) { + tests := []struct { + name string + setup func(dir string) error + expectedFile string + expectedFiles int + expectError bool + }{ + { + name: "No files in directory", + setup: func(_ string) error { + return nil + }, + expectedFile: "", + expectedFiles: 0, + expectError: false, + }, + { + name: "Single file in directory", + setup: func(dir string) error { + _, err := os.Create(path.Join(dir, "file1.txt")) + return err + }, + expectedFile: "file1.txt", + expectedFiles: 1, + expectError: false, + }, + { + name: "Multiple files in directory", + setup: func(dir string) error { + for i := 1; i <= 3; i++ { + if _, err := os.Create(path.Join(dir, fmt.Sprintf("file%d.txt", i))); err != nil { + return err + } + } + return nil + }, + expectedFile: "file1.txt", + expectedFiles: 3, + expectError: false, + }, + { + name: "Nested directories", + setup: func(dir string) error { + subDir := path.Join(dir, "subdir") + if err := os.Mkdir(subDir, 0o755); err != nil { + return err + } + if _, err := os.Create(path.Join(subDir, "file1.txt")); err != nil { + return err + } + return nil + }, + expectedFile: "subdir/file1.txt", + expectedFiles: 1, + expectError: false, + }, + { + name: "Error walking directory", + setup: func(dir string) error { + return os.Chmod(dir, 0o000) + }, + expectedFile: "", + expectedFiles: 0, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + if tt.setup != nil { + if err := tt.setup(dir); err != nil { + t.Errorf("Setup failed: %v", err) + } + } + files, err := getFilesSortedByModTimeAsc(dir) + if (err != nil) != tt.expectError { + t.Errorf("getEarliestFile() error = %v, expectError %v", err, tt.expectError) + } + if !tt.expectError { + if len(files) != tt.expectedFiles { + t.Errorf("Expected %d files, got %d", tt.expectedFiles, len(files)) + } + if tt.expectedFile != "" && !strings.HasSuffix(files[0], tt.expectedFile) { + t.Errorf("Expected earliest file %s, got %s", tt.expectedFile, files[0]) + } + } + }) + } +} + +func TestValidatePath(t *testing.T) { + tests := []struct { + name string + path string + setup func(path string) error + expectError bool + expectedErr string + }{ + { + name: "Valid path", + path: t.TempDir(), + setup: nil, + expectError: false, + }, + { + name: "Empty path", + path: "", + setup: nil, + expectError: true, + expectedErr: "path cannot be empty", + }, + { + name: "Path with '..'", + path: "../invalid/path", + setup: nil, + expectError: true, + expectedErr: "path must not contain '..', dir traversal is not allowed", + }, + { + name: "Path is a file", + path: func() string { + file, err := os.CreateTemp("", "testfile") + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + return file.Name() + }(), + setup: nil, + expectError: true, + expectedErr: "failed to create directory", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + if err := tt.setup(tt.path); err != nil { + t.Fatalf("Setup failed: %v", err) + } + } + err := validatePath(tt.path) + if (err != nil) != tt.expectError { + t.Errorf("validatePath() error = %v, expectError %v", err, tt.expectError) + } + if tt.expectError && err != nil && !strings.Contains(err.Error(), tt.expectedErr) { + t.Errorf("Expected error to contain %q, got %q", tt.expectedErr, err.Error()) + } + }) + } +} + +func TestUnmarshalConfig(t *testing.T) { + tmpPath := t.TempDir() + + tests := []struct { + name string + config interface{} + expectedPath string + expectedMax float64 + expectError bool + expectedErr string + }{ + { + name: "Valid config", + config: map[string]interface{}{ + "path": tmpPath, + "maxAuditResults": 3.0, + }, + expectedPath: tmpPath, + expectedMax: 3.0, + expectError: false, + }, + { + name: "Invalid config format", + config: map[int]interface{}{1: "test"}, + expectError: true, + expectedErr: "invalid config format", + }, + { + name: "Missing path", + config: map[string]interface{}{ + "maxAuditResults": 3.0, + }, + expectError: true, + expectedErr: "missing or invalid 'path'", + }, + { + name: "Invalid path", + config: map[string]interface{}{ + "path": "../invalid/path", + "maxAuditResults": 3.0, + }, + expectError: true, + expectedErr: "invalid path", + }, + { + name: "Missing maxAuditResults", + config: map[string]interface{}{ + "path": tmpPath, + }, + expectError: true, + expectedErr: "missing or invalid 'maxAuditResults'", + }, + { + name: "Exceeding maxAuditResults", + config: map[string]interface{}{ + "path": tmpPath, + "maxAuditResults": 10.0, + }, + expectError: true, + expectedErr: "maxAuditResults cannot be greater than the maximum allowed audit runs", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + path, maxResults, err := unmarshalConfig(tt.config) + if (err != nil) != tt.expectError { + t.Errorf("unmarshalConfig() error = %v, expectError %v", err, tt.expectError) + } + if tt.expectError && err != nil && !strings.Contains(err.Error(), tt.expectedErr) { + t.Errorf("Expected error to contain %q, got %q", tt.expectedErr, err.Error()) + } + if !tt.expectError { + if path != tt.expectedPath { + t.Errorf("Expected path %q, got %q", tt.expectedPath, path) + } + if maxResults != tt.expectedMax { + t.Errorf("Expected maxAuditResults %f, got %f", tt.expectedMax, maxResults) + } + } + }) + } +} + + diff --git a/pkg/export/system.go b/pkg/export/system.go index d9863c35a65..74130e19961 100644 --- a/pkg/export/system.go +++ b/pkg/export/system.go @@ -6,11 +6,13 @@ import ( "sync" "github.com/open-policy-agent/gatekeeper/v3/pkg/export/dapr" + "github.com/open-policy-agent/gatekeeper/v3/pkg/export/disk" "github.com/open-policy-agent/gatekeeper/v3/pkg/export/driver" ) var SupportedDrivers = map[string]driver.Driver{ dapr.Name: dapr.Connections, + disk.Name: disk.Connections, } type System struct { diff --git a/pkg/export/util/util.go b/pkg/export/util/util.go new file mode 100644 index 00000000000..c78488e5e68 --- /dev/null +++ b/pkg/export/util/util.go @@ -0,0 +1,49 @@ +package util + +// ExportMsg represents export message for each violation. +type ExportMsg struct { + ID string `json:"id,omitempty"` + Details interface{} `json:"details,omitempty"` + EventType string `json:"eventType,omitempty"` + Group string `json:"group,omitempty"` + Version string `json:"version,omitempty"` + Kind string `json:"kind,omitempty"` + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Message string `json:"message,omitempty"` + EnforcementAction string `json:"enforcementAction,omitempty"` + EnforcementActions []string `json:"enforcementActions,omitempty"` + ConstraintAnnotations map[string]string `json:"constraintAnnotations,omitempty"` + ResourceGroup string `json:"resourceGroup,omitempty"` + ResourceAPIVersion string `json:"resourceAPIVersion,omitempty"` + ResourceKind string `json:"resourceKind,omitempty"` + ResourceNamespace string `json:"resourceNamespace,omitempty"` + ResourceName string `json:"resourceName,omitempty"` + ResourceLabels map[string]string `json:"resourceLabels,omitempty"` +} + +type ExportErr struct { + Code ExportError `json:"code"` + Message string `json:"message"` +} + +func (e ExportErr) Error() string { + return e.Message +} + +type ExportError string + +const ( + ErrConnectionNotFound ExportError = "connection_not_found" + ErrInvalidDataType ExportError = "invalid_data_type" + ErrCreatingFile ExportError = "error_creating_file" + ErrFileDoesNotExist ExportError = "file_does_not_exist" + ErrMarshalingData ExportError = "error_marshaling_data" + ErrWritingMessage ExportError = "error_writing_message" + ErrCleaningUpAudit ExportError = "error_cleaning_up_audit" +) + +const ( + AuditStartedMsg = "audit is started" + AuditCompletedMsg = "audit is completed" +) diff --git a/test/bats/helpers.bash b/test/bats/helpers.bash index e2acb9e1205..b28ee6546fa 100644 --- a/test/bats/helpers.bash +++ b/test/bats/helpers.bash @@ -140,8 +140,17 @@ mutator_enforced() { } total_violations() { + local backend="$1" ct_total_violations="$(kubectl get k8srequiredlabels pod-must-have-test -n gatekeeper-system -ojson | jq '.status.totalViolations')" audit_id="$(kubectl get k8srequiredlabels pod-must-have-test -n gatekeeper-system -ojson | jq '.status.auditTimestamp')" - violations="$(kubectl logs -n fake-subscriber -l app=sub -c go-sub --tail=-1 | grep $audit_id | grep violation_audited | wc -l)" + violations="" + if [[ "${backend}" == "dapr" ]]; then + violations="$(kubectl logs -n fake-subscriber -l app=sub -c go-sub --tail=-1 | grep $audit_id | grep violation_audited | wc -l)" + elif [[ "${backend}" == "disk" ]]; then + violations="$(kubectl logs -n gatekeeper-system -l gatekeeper.sh/operation=audit -c reader --tail=-1 | grep $audit_id | grep violation_audited | wc -l)" + else + echo "Unknown backend: ${backend}" + return 1 + fi [[ "${ct_total_violations}" -eq "${violations}" ]] } diff --git a/test/bats/test.bats b/test/bats/test.bats index 63e8ca268f0..3724833c98e 100644 --- a/test/bats/test.bats +++ b/test/bats/test.bats @@ -16,6 +16,7 @@ teardown_file() { kubectl label ns ${GATEKEEPER_NAMESPACE} admission.gatekeeper.sh/ignore=no-self-managing --overwrite || true kubectl delete ns \ gatekeeper-test-playground \ + gatekeeper-test-playground-scoped \ gatekeeper-excluded-namespace \ gatekeeper-excluded-prefix-match-namespace \ gatekeeper-excluded-suffix-match-namespace || true @@ -673,7 +674,7 @@ __expansion_audit_test() { wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "constraint_enforced k8srequiredlabels pod-must-have-test" - wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "total_violations" + wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "total_violations ${EXPORT_BACKEND}" run kubectl delete -f test/export/k8srequiredlabels_ct.yaml --ignore-not-found run kubectl delete -f test/export/pod_must_have_test.yaml --ignore-not-found diff --git a/test/export/fake-reader/Dockerfile b/test/export/fake-reader/Dockerfile new file mode 100644 index 00000000000..b703ecbd7d5 --- /dev/null +++ b/test/export/fake-reader/Dockerfile @@ -0,0 +1,31 @@ +FROM --platform=$BUILDPLATFORM golang:1.24-bookworm@sha256:00eccd446e023d3cd9566c25a6e6a02b90db3e1e0bbe26a48fc29cd96e800901 AS builder + +ARG TARGETPLATFORM +ARG TARGETOS +ARG TARGETARCH +ARG TARGETVARIANT="" +ARG LDFLAGS + +ENV GO111MODULE=on \ + CGO_ENABLED=0 \ + GOOS=${TARGETOS} \ + GOARCH=${TARGETARCH} \ + GOARM=${TARGETVARIANT} + +WORKDIR /go/src/github.com/open-policy-agent/gatekeeper/test/export/fake-reader + +COPY . . + +RUN go mod init && go mod tidy && go mod vendor + +RUN go build -o main + +FROM gcr.io/distroless/static-debian12@sha256:3d0f463de06b7ddff27684ec3bfd0b54a425149d0f8685308b1fdf297b0265e9 + +WORKDIR / + +COPY --from=builder /go/src/github.com/open-policy-agent/gatekeeper/test/export/fake-reader/main . + +USER 65532:65532 + +ENTRYPOINT ["/main"] \ No newline at end of file diff --git a/test/export/fake-reader/export_config.yaml b/test/export/fake-reader/export_config.yaml new file mode 100644 index 00000000000..a56339c805f --- /dev/null +++ b/test/export/fake-reader/export_config.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: audit + namespace: gatekeeper-system +data: + driver: "disk" + config: | + { + "path": "/tmp/violations", + "maxAuditResults": 3 + } diff --git a/test/export/fake-reader/main.go b/test/export/fake-reader/main.go new file mode 100644 index 00000000000..02fcff1cb49 --- /dev/null +++ b/test/export/fake-reader/main.go @@ -0,0 +1,102 @@ +package main + +import ( + "bufio" + "fmt" + "log" + "os" + "path/filepath" + "strings" + "syscall" + "time" +) + +func main() { + dirPath := "/tmp/violations/" + info, err := os.Stat(dirPath) + if err != nil { + log.Fatalf("failed to stat path: %v", err) + } + if !info.IsDir() { + log.Fatalf("path is not a directory") + } + + for { + // Find the latest created file in dirPath + latestFile, files, err := getLatestFile(dirPath) + if err != nil { + log.Println("Latest file is not found, retring in 5 seconds", err) + time.Sleep(5 * time.Second) + continue + } + log.Println("available files", files) + log.Println("reading from", latestFile) + file, err := os.OpenFile(latestFile, os.O_RDONLY, 0o644) + if err != nil { + log.Println("Error opening file", err) + time.Sleep(5 * time.Second) + continue + } + + // Acquire an exclusive lock on the file + if err := syscall.Flock(int(file.Fd()), syscall.LOCK_EX); err != nil { + log.Fatalln("Error locking file", err) + } + + // Read the file content + scanner := bufio.NewScanner(file) + var lines []string + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + if err := scanner.Err(); err != nil { + log.Fatalln("Error reading file", err) + } + + // Process the read content + for _, line := range lines { + log.Println(line) + } + + // Release the lock + if err := syscall.Flock(int(file.Fd()), syscall.LOCK_UN); err != nil { + log.Fatalln("Error unlocking file", err) + } + + // Close the file + if err := file.Close(); err != nil { + log.Fatalln("Error closing file", err) + } + time.Sleep(90 * time.Second) + } +} + +func getLatestFile(dirPath string) (string, []string, error) { + var latestFile string + var latestModTime time.Time + var files []string + + err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() && strings.Contains(path, ".log") && (latestFile == "" || info.ModTime().After(latestModTime)) { + latestFile = path + latestModTime = info.ModTime() + } + if !info.IsDir() { + files = append(files, path) + } + return nil + }) + if err != nil { + return "", files, err + } + + if latestFile == "" { + return "", files, fmt.Errorf("no files found in directory: %s", dirPath) + } + + return latestFile, files, nil +} diff --git a/test/export/fake-subscriber/Dockerfile b/test/export/fake-subscriber/Dockerfile index d4992b10fef..c09a00ad1df 100644 --- a/test/export/fake-subscriber/Dockerfile +++ b/test/export/fake-subscriber/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$BUILDPLATFORM golang:1.24-bookworm@sha256:fa1a01d362a7b9df68b021d59a124d28cae6d99ebd1a876e3557c4dd092f1b1d AS builder +FROM --platform=$BUILDPLATFORM golang:1.24-bookworm@sha256:00eccd446e023d3cd9566c25a6e6a02b90db3e1e0bbe26a48fc29cd96e800901 AS builder ARG TARGETPLATFORM ARG TARGETOS @@ -20,7 +20,7 @@ RUN go mod init && go mod tidy && go mod vendor RUN go build -o main -FROM gcr.io/distroless/static-debian12@sha256:8dd8d3ca2cf283383304fd45a5c9c74d5f2cd9da8d3b077d720e264880077c65 +FROM gcr.io/distroless/static-debian12@sha256:3d0f463de06b7ddff27684ec3bfd0b54a425149d0f8685308b1fdf297b0265e9 WORKDIR / diff --git a/test/export/publish-components.yaml b/test/export/fake-subscriber/manifest/publish-components.yaml similarity index 100% rename from test/export/publish-components.yaml rename to test/export/fake-subscriber/manifest/publish-components.yaml diff --git a/website/docs/export.md b/website/docs/export.md index bf5f434c7c5..f4b93106aae 100644 --- a/website/docs/export.md +++ b/website/docs/export.md @@ -41,11 +41,13 @@ data: - `config` field is a json object that configures how the connection is made. E.g. which queue messages should be sent to. #### Available drivers -Dapr: https://dapr.io/ + +- Dapr: Export violations using pubsub model provided with [Dapr](https://dapr.io/) +- Disk: Export violations to file system. ### Quick start with exporting violations using Dapr and Redis -#### Prerequisites +#### Prerequisites for Dapr 1. Install Dapr @@ -130,10 +132,10 @@ Dapr: https://dapr.io/ ``` :::important - Please make sure `fake-subscriber` image is built and available in your cluster. Dockerfile to build image for `fake-subscriber` is under [gatekeeper/test/fake-subscriber](https://github.com/open-policy-agent/gatekeeper/tree/master/test/export/fake-subscriber). + Please make sure `fake-subscriber` image is built and available in your cluster. Dockerfile to build image for `fake-subscriber` is under [gatekeeper/test/export/fake-subscriber](https://github.com/open-policy-agent/gatekeeper/tree/master/test/export/fake-subscriber). ::: -#### Configure Gatekeeper with Export enabled +#### Configure Gatekeeper with Export enabled with Dapr 1. Create Gatekeeper namespace, and create Dapr pubsub component and Redis secret in Gatekeeper's namespace (`gatekeeper-system` by default). Please make sure to update `gatekeeper-system` namespace for the next steps if your cluster's Gatekeeper namespace is different. @@ -209,6 +211,91 @@ Dapr: https://dapr.io/ 2023/07/18 20:37:20 main.ExportMsg{ID:"2023-07-18T20:37:19Z", Details:map[string]interface {}{"missing_labels":[]interface {}{"test"}}, EventType:"violation_audited", Group:"constraints.gatekeeper.sh", Version:"v1beta1", Kind:"K8sRequiredLabels", Name:"pod-must-have-test", Namespace:"", Message:"you must provide labels: {\"test\"}", EnforcementAction:"deny", ConstraintAnnotations:map[string]string(nil), ResourceGroup:"", ResourceAPIVersion:"v1", ResourceKind:"Pod", ResourceNamespace:"nginx", ResourceName:"nginx-deployment-58899467f5-j85bs", ResourceLabels:map[string]string{"app":"nginx", "owner":"admin", "pod-template-hash":"58899467f5"}} ``` +### Quick start with exporting violations on node storage using Disk driver via emptyDir + +#### Configure Gatekeeper with Export enabled to Disk + +1. Deploy Gatekeeper with disk export configurations. + + Below are the default configurations that enable disk export and add a sidecar container to the Gatekeeper audit pod: + + ```yaml + audit: + exportVolume: + name: tmp-violations + emptyDir: {} + exportVolumeMount: + path: /tmp/violations + exportSidecar: + name: reader + image: ghcr.io/open-policy-agent/fake-reader:latest + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /tmp/violations + name: tmp-violations + ``` + + :::warning + The reader sidecar image `ghcr.io/open-policy-agent/fake-reader:latest` and the provided default configurations are intended for demonstration and quickstart purposes only. They are not recommended for production environments. For production use, it is advised to create and configure a custom sidecar image tailored to your specific requirements. + ::: + + ```shell + helm upgrade --install gatekeeper gatekeeper/gatekeeper --namespace gatekeeper-system \ + --set enableViolationExport=true \ + --set audit.connection=audit-connection \ + --set audit.channel=audit-channel \ + --set audit.exportConfig.maxAuditResults=3 \ + --set exportBackend=disk \ + ``` + + **Note**: After the audit pod starts, verify that it contains two running containers. + + ```shell + kubectl get pod -n gatekeeper-system + NAME READY STATUS RESTARTS AGE + gatekeeper-audit-6865f5f56d-vclxw 2/2 Running 0 12s + ``` + + :::tip + The command above deploys the audit pod with a default sidecar reader and volume. To customize the sidecar reader or volume according to your requirements, you can set the following variables in your values.yaml file: + + ```yaml + audit: + exportVolume: + + exportVolumeMount: + path: + exportSidecar: + + ``` + ::: + +2. Create the constraint templates and constraints, and make sure audit ran by checking constraints. If constraint status is updated with information such as `auditTimeStamp` or `totalViolations`, then audit has ran at least once. Additionally, populated `TOTAL-VIOLATIONS` field for all constraints while listing constraints also indicates that audit has ran at least once. + + ```log + kubectl get constraint + NAME ENFORCEMENT-ACTION TOTAL-VIOLATIONS + pod-must-have-test 0 + ``` + +3. Finally, check the sidecar reader logs to see the violations written. + + ```log + kubectl logs -l gatekeeper.sh/operation=audit -c go-sub -n gatekeeper-system + 2025/03/05 00:37:16 {"id":"2025-03-05T00:37:13Z","details":{"missing_labels":["test"]},"eventType":"violation_audited","group":"constraints.gatekeeper.sh","version":"v1beta1","kind":"K8sRequiredLabels","name":"pod-must-have-test","message":"you must provide labels: {\"test\"}","enforcementAction":"deny","resourceAPIVersion":"v1","resourceKind":"Pod","resourceNamespace":"nginx","resourceName":"nginx-deployment-2-79479fc6db-7qbnm","resourceLabels":{"app":"nginx-ingress","app.kubernetes.io/component":"controller","pod-template-hash":"79479fc6db"}} + ``` + ### Violations The audit pod exports violations in following format: