-
Notifications
You must be signed in to change notification settings - Fork 173
Open
Labels
Description
Terraform Enterprise version
v202503-1
Terraform version
1.10.5
Terraform Configuration Files
terraform {
required_version = ">= 1.5.7"
backend "local" {}
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 5.55.0"
}
tfe = {
version = "~> 0.64.0"
}
}
}
provider "aws" {
region = "us-east-1"
ignore_tags {
keys = ["REDACTED"]
}
}
provider "tfe" {
hostname = "REDACTED"
token = "REDACTED"
organization = "REDACTED"
}
data "aws_region" "current" {}
resource "aws_s3_bucket" "this" {
#count = local.innovate_lab_account ? 1 : 0
#provider = local.region_providers[each.value]
bucket = "REDACTED-test-bucket-2390-${data.aws_region.current.name}"
#force_destroy = var.force_destroy
#tags = local.tags
}
resource "tfe_workspace" "base_infra" {
name = "base-infra-workspace"
organization = "REDACTED"
tag_names = ["test", "base_infra"]
working_directory = "base-infra"
queue_all_runs = false
vcs_repo {
branch = "master"
identifier = "REDACTED/aws-REDACTED-deployment"
oauth_token_id = "REDACTED"
}
}
resource "tfe_variable" "base_infra" {
key = "TF_CLI_ARGS"
value = "-var-file=environments/base-infraacc1.tfvars"
category = "terraform"
workspace_id = tfe_workspace.base_infra.id
description = "using tfvars file"
}
resource "tfe_workspace_run" "ws_run_base_infra" {
workspace_id = tfe_workspace.base_infra.id
apply {
manual_confirm = false
wait_for_run = true
retry_attempts = 5
retry_backoff_min = 5
}
destroy {
manual_confirm = false
wait_for_run = true
retry_attempts = 3
retry_backoff_min = 10
}
}
### App Infra
resource "tfe_workspace" "app_infra" {
name = "app-infra-workspace"
organization = "REDACTED"
tag_names = ["test", "app_infra"]
working_directory = "app-infra"
queue_all_runs = false
vcs_repo {
branch = "master"
identifier = "REDACTED/aws-REDACTED-deployment"
oauth_token_id = "REDACTED"
}
}
resource "tfe_variable" "app_infra" {
key = "TF_CLI_ARGS"
value = "-var-file=environments/app-infraacc1.tfvars"
category = "terraform"
workspace_id = tfe_workspace.app_infra.id
description = "using tfvars file"
}
resource "tfe_workspace_run" "ws_run_app_infra" {
workspace_id = tfe_workspace.app_infra.id
depends_on = [tfe_workspace_run.ws_run_base_infra]
apply {
manual_confirm = false
retry_attempts = 5
retry_backoff_min = 5
}
destroy {
manual_confirm = false
wait_for_run = true
retry_attempts = 3
retry_backoff_min = 10
}
}
### Initial run to Baase Infra
resource "tfe_run_trigger" "main_2_baseInfra" {
workspace_id = tfe_workspace.app_infra.id
sourceable_id = "REDACTED" # the workspace running this code, for reference
}
### Base Infra to APP Infra
resource "tfe_run_trigger" "baseinfra_2_appInfra" {
workspace_id = tfe_workspace.app_infra.id
sourceable_id = tfe_workspace.base_infra.id
}
Debug Output
Please see https://gist.github.com/nphilbrook/f715c8e0771bdad64fedde86e2cdd69a
Expected Behavior
Lack of panic
Actual Behavior
panic during plan. See gist above for full output. Just the stack trace here:
Stack trace from the terraform-provider-tfe_v0.64.0_x5 plugin:
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0xd79c50]
goroutine 157 [running]:
github.com/hashicorp/terraform-provider-tfe/internal/provider.resourceTFERunTriggerRead(0xc00042f300, {0xf6c1c0?, 0xc000420420?})
github.com/hashicorp/terraform-provider-tfe/internal/provider/resource_tfe_run_trigger.go:97 +0x510
github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema.(*Resource).read(0x11cec58?, {0x11cec58?, 0xc00087ef30?}, 0xd?, {0xf6c1c0?, 0xc000420420?})
github.com/hashicorp/terraform-plugin-sdk/v2@v2.35.0/helper/schema/resource.go:811 +0x15f
github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema.(*Resource).RefreshWithoutUpgrade(0xc000301960, {0x11cec58, 0xc00087ef30}, 0xc000679a00, {0xf6c1c0, 0xc000420420})
github.com/hashicorp/terraform-plugin-sdk/v2@v2.35.0/helper/schema/resource.go:1117 +0x529
github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema.(*GRPCProviderServer).ReadResource(0xc000011a28, {0x11cec58?, 0xc00087ee40?}, 0xc000139640)
github.com/hashicorp/terraform-plugin-sdk/v2@v2.35.0/helper/schema/grpc_provider.go:710 +0x6c5
github.com/hashicorp/terraform-plugin-mux/tf5muxserver.(*muxServer).ReadResource(0xc0002aa700, {0x11cec58?, 0xc00087eb70?}, 0xc000139640)
github.com/hashicorp/terraform-plugin-mux@v0.18.0/tf5muxserver/mux_server_ReadResource.go:35 +0x196
github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server.(*server).ReadResource(0xc0001260a0, {0x11cec58?, 0xc00023a9c0?}, 0xc000022bd0)
github.com/hashicorp/terraform-plugin-go@v0.26.0/tfprotov5/tf5server/server.go:784 +0x30c
github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5._Provider_ReadResource_Handler({0xffeaa0, 0xc0001260a0}, {0x11cec58, 0xc00023a9c0}, 0xc00042f080, 0x0)
github.com/hashicorp/terraform-plugin-go@v0.26.0/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go:575 +0x1a9
google.golang.org/grpc.(*Server).processUnaryRPC(0xc000176000, {0x11cec58, 0xc00023a930}, 0xc0004b78c0, 0xc00011ad20, 0x18ac728, 0x0)
google.golang.org/grpc@v1.70.0/server.go:1400 +0x103b
google.golang.org/grpc.(*Server).handleStream(0xc000176000, {0x11cf980, 0xc000304d00}, 0xc0004b78c0)
google.golang.org/grpc@v1.70.0/server.go:1810 +0xbaa
google.golang.org/grpc.(*Server).serveStreams.func2.1()
google.golang.org/grpc@v1.70.0/server.go:1030 +0x8b
created by google.golang.org/grpc.(*Server).serveStreams.func2 in goroutine 7
google.golang.org/grpc@v1.70.0/server.go:1041 +0x125
Error: The terraform-provider-tfe_v0.64.0_x5 plugin crashed!
This is always indicative of a bug within the plugin. It would be immensely
helpful if you could report the crash with the plugin's maintainers so that it
can be fixed. The output above should help diagnose the issue.
2025-04-15T16:52:03.391Z [DEBUG] provider: plugin exited
Operation failed: failed running terraform plan (exit 1)%
Additional Context
State file as it existed prior to this run: https://gist.github.com/nphilbrook/18d8b841f61106925ef98bae5770e915
All runs prior to this run were done with an organization token, which was able to create the workspaces, variables, etc., but failed to create the tfe_workspace_runs that you can see in the configuration. This failed run was done with a team token to try to create the tfe_workspace_run resources, but we got the panic on plan instead.
andybaran