diff --git a/src/deploy/functions/backend.ts b/src/deploy/functions/backend.ts index 36de5eae317..ad8799f0f57 100644 --- a/src/deploy/functions/backend.ts +++ b/src/deploy/functions/backend.ts @@ -300,7 +300,7 @@ export interface ServiceConfiguration { serviceAccount?: string | null; } -export type FunctionsPlatform = "gcfv1" | "gcfv2"; +export const AllFunctionsPlatforms: FunctionsPlatform[] = ["gcfv1", "gcfv2", "run"]; export const AllFunctionsPlatforms: FunctionsPlatform[] = ["gcfv1", "gcfv2"]; export type Triggered = diff --git a/src/deploy/functions/build.ts b/src/deploy/functions/build.ts index 9fca9b58d09..5e6565294d4 100644 --- a/src/deploy/functions/build.ts +++ b/src/deploy/functions/build.ts @@ -208,7 +208,8 @@ export interface SecretEnvVar { export type MemoryOption = 128 | 256 | 512 | 1024 | 2048 | 4096 | 8192 | 16384 | 32768; const allMemoryOptions: MemoryOption[] = [128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768]; -export type FunctionsPlatform = backend.FunctionsPlatform; +// Run is an automatic migration from gcfv2 and is not used on the wire. +export type FunctionsPlatform = Exclude; export const AllFunctionsPlatforms: FunctionsPlatform[] = ["gcfv1", "gcfv2"]; export type VpcEgressSetting = backend.VpcEgressSettings; export const AllVpcEgressSettings: VpcEgressSetting[] = ["PRIVATE_RANGES_ONLY", "ALL_TRAFFIC"]; diff --git a/src/deploy/functions/ensure.ts b/src/deploy/functions/ensure.ts index 0efd0f979ae..5828d461835 100644 --- a/src/deploy/functions/ensure.ts +++ b/src/deploy/functions/ensure.ts @@ -28,7 +28,7 @@ export async function defaultServiceAccount(e: backend.Endpoint): Promise> = { gcfv1: { 1: { ram: 0, cpu: 0 }, 2: { ram: 0, cpu: 0 } }, gcfv2: { 1: { ram: 0, cpu: 0 }, 2: { ram: 0, cpu: 0 } }, + run: { 1: { ram: 0, cpu: 0 }, 2: { ram: 0, cpu: 0 } }, }; for (const endpoint of endpoints) { @@ -188,10 +195,10 @@ export function monthlyMinInstanceCost(endpoints: backend.Endpoint[]): number { } else { // V2 is currently fixed at 1vCPU. const tier = V2_REGION_TO_TIER[endpoint.region]; - usage["gcfv2"][tier].ram = - usage["gcfv2"][tier].ram + ramGb * SECONDS_PER_MONTH * endpoint.minInstances; - usage["gcfv2"][tier].cpu = - usage["gcfv2"][tier].cpu + + usage[endpoint.platform][tier].ram = + usage[endpoint.platform][tier].ram + ramGb * SECONDS_PER_MONTH * endpoint.minInstances; + usage[endpoint.platform][tier].cpu = + usage[endpoint.platform][tier].cpu + (endpoint.cpu as number) * SECONDS_PER_MONTH * endpoint.minInstances; } } @@ -218,5 +225,15 @@ export function monthlyMinInstanceCost(endpoints: backend.Endpoint[]): number { v2CpuBill -= V2_FREE_TIER.vCpu * V2_RATES.vCpu[1]; v2CpuBill = Math.max(v2CpuBill, 0); - return v1MemoryBill + v1CpuBill + v2MemoryBill + v2CpuBill; + let runMemoryBill = + usage["run"][1].ram * V2_RATES.memoryGb[1] + usage["run"][2].ram * V2_RATES.memoryGb[2]; + runMemoryBill -= V2_FREE_TIER.memoryGb * V2_RATES.memoryGb[1]; + runMemoryBill = Math.max(runMemoryBill, 0); + + let runCpuBill = + usage["run"][1].cpu * V2_RATES.idleVCpu[1] + usage["run"][2].cpu * V2_RATES.idleVCpu[2]; + runCpuBill -= V2_FREE_TIER.vCpu * V2_RATES.vCpu[1]; + runCpuBill = Math.max(runCpuBill, 0); + + return v1MemoryBill + v1CpuBill + v2MemoryBill + v2CpuBill + runMemoryBill + runCpuBill; } diff --git a/src/deploy/functions/release/fabricator.ts b/src/deploy/functions/release/fabricator.ts index 7ebc4cf78ed..468ee09a08c 100644 --- a/src/deploy/functions/release/fabricator.ts +++ b/src/deploy/functions/release/fabricator.ts @@ -183,6 +183,10 @@ export class Fabricator { await this.createV1Function(endpoint, scraperV1); } else if (endpoint.platform === "gcfv2") { await this.createV2Function(endpoint, scraperV2); + } else if (endpoint.platform === "run") { + throw new FirebaseError("Creating new Cloud Run functions is not supported yet.", { + exit: 1, + }); } else { assertExhaustive(endpoint.platform); } @@ -206,6 +210,8 @@ export class Fabricator { await this.updateV1Function(update.endpoint, scraperV1); } else if (update.endpoint.platform === "gcfv2") { await this.updateV2Function(update.endpoint, scraperV2); + } else if (update.endpoint.platform === "run") { + throw new FirebaseError("Updating Cloud Run functions is not supported yet.", { exit: 1 }); } else { assertExhaustive(update.endpoint.platform); } @@ -216,10 +222,13 @@ export class Fabricator { async deleteEndpoint(endpoint: backend.Endpoint): Promise { await this.deleteTrigger(endpoint); if (endpoint.platform === "gcfv1") { - await this.deleteV1Function(endpoint); - } else { - await this.deleteV2Function(endpoint); + return this.deleteV1Function(endpoint); + } else if (endpoint.platform === "gcfv2") { + return this.deleteV2Function(endpoint); + } else if (endpoint.platform === "run") { + throw new FirebaseError("Deleting Cloud Run functions is not supported yet.", { exit: 1 }); } + assertExhaustive(endpoint.platform); } async createV1Function(endpoint: backend.Endpoint, scraper: SourceTokenScraper): Promise { @@ -623,6 +632,11 @@ export class Fabricator { // Set/Delete trigger is responsible for wiring up a function with any trigger not owned // by the GCF API. This includes schedules, task queues, and blocking function triggers. async setTrigger(endpoint: backend.Endpoint): Promise { + if (endpoint.platform === "run") { + throw new FirebaseError("Setting triggers for Cloud Run functions is not supported yet.", { + exit: 1, + }); + } if (backend.isScheduleTriggered(endpoint)) { if (endpoint.platform === "gcfv1") { await this.upsertScheduleV1(endpoint); @@ -640,6 +654,11 @@ export class Fabricator { } async deleteTrigger(endpoint: backend.Endpoint): Promise { + if (endpoint.platform === "run") { + throw new FirebaseError("Deleting triggers for Cloud Run functions is not supported yet.", { + exit: 1, + }); + } if (backend.isScheduleTriggered(endpoint)) { if (endpoint.platform === "gcfv1") { await this.deleteScheduleV1(endpoint); diff --git a/src/experiments.ts b/src/experiments.ts index 6027b9d5d3a..c2c286bed6d 100644 --- a/src/experiments.ts +++ b/src/experiments.ts @@ -57,6 +57,11 @@ export const ALL_EXPERIMENTS = experiments({ "of how that image was created.", public: true, }, + runfunctions: { + shortDescription: + "Functions created using the V2 API target Cloud Run Functions (not production ready)", + public: false, + }, dangerouslyAllowFunctionsConfig: { shortDescription: "Allows the use of deprecated functions.config() API", fullDescription: diff --git a/src/functions/secrets.ts b/src/functions/secrets.ts index a0cea57f300..9b58641ed8a 100644 --- a/src/functions/secrets.ts +++ b/src/functions/secrets.ts @@ -359,6 +359,10 @@ export async function updateEndpointSecret( operationResourceName: op.name, }); return gcfV2.endpointFromFunction(cfn); + } else if (endpoint.platform === "run") { + // This may be tricky because the image has been deleted. How does this work + // with GCF? + throw new FirebaseError("Updating Cloud Run functions is not yet implemented."); } else { assertExhaustive(endpoint.platform); } diff --git a/src/gcp/cloudfunctionsv2.spec.ts b/src/gcp/cloudfunctionsv2.spec.ts index ffc0ac5c255..46fae53ceaf 100644 --- a/src/gcp/cloudfunctionsv2.spec.ts +++ b/src/gcp/cloudfunctionsv2.spec.ts @@ -62,30 +62,6 @@ describe("cloudfunctionsv2", () => { updateTime: new Date(), }; - describe("megabytes", () => { - enum Bytes { - KB = 1e3, - MB = 1e6, - GB = 1e9, - KiB = 1 << 10, - MiB = 1 << 20, - GiB = 1 << 30, - } - it("Should handle decimal SI units", () => { - expect(cloudfunctionsv2.mebibytes("1000k")).to.equal((1000 * Bytes.KB) / Bytes.MiB); - expect(cloudfunctionsv2.mebibytes("1.5M")).to.equal((1.5 * Bytes.MB) / Bytes.MiB); - expect(cloudfunctionsv2.mebibytes("1G")).to.equal(Bytes.GB / Bytes.MiB); - }); - it("Should handle binary SI units", () => { - expect(cloudfunctionsv2.mebibytes("1Mi")).to.equal(Bytes.MiB / Bytes.MiB); - expect(cloudfunctionsv2.mebibytes("1Gi")).to.equal(Bytes.GiB / Bytes.MiB); - }); - it("Should handle no unit", () => { - expect(cloudfunctionsv2.mebibytes("100000")).to.equal(100000 / Bytes.MiB); - expect(cloudfunctionsv2.mebibytes("1e9")).to.equal(1e9 / Bytes.MiB); - expect(cloudfunctionsv2.mebibytes("1.5E6")).to.equal((1.5 * 1e6) / Bytes.MiB); - }); - }); describe("functionFromEndpoint", () => { it("should guard against version mixing", () => { expect(() => { diff --git a/src/gcp/cloudfunctionsv2.ts b/src/gcp/cloudfunctionsv2.ts index 24669f0d61c..60d230f7d24 100644 --- a/src/gcp/cloudfunctionsv2.ts +++ b/src/gcp/cloudfunctionsv2.ts @@ -17,6 +17,7 @@ import { HASH_LABEL, } from "../functions/constants"; import { RequireKeys } from "../metaprogramming"; +import { mebibytes } from "./k8s"; import { captureRuntimeValidationError } from "./cloudfunctions"; export const API_VERSION = "v2"; @@ -207,43 +208,6 @@ interface GenerateUploadUrlResponse { storageSource: StorageSource; } -// AvailableMemory suffixes and their byte count. -type MemoryUnit = "" | "k" | "M" | "G" | "T" | "Ki" | "Mi" | "Gi" | "Ti"; -const BYTES_PER_UNIT: Record = { - "": 1, - k: 1e3, - M: 1e6, - G: 1e9, - T: 1e12, - Ki: 1 << 10, - Mi: 1 << 20, - Gi: 1 << 30, - Ti: 1 << 40, -}; - -/** - * Returns the float-precision number of Mega(not Mebi)bytes in a - * Kubernetes-style quantity - * Must serve the same results as - * https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go - */ -export function mebibytes(memory: string): number { - const re = /^([0-9]+(\.[0-9]*)?)(Ki|Mi|Gi|Ti|k|M|G|T|([eE]([0-9]+)))?$/; - const matches = re.exec(memory); - if (!matches) { - throw new Error(`Invalid memory quantity "${memory}""`); - } - const quantity = Number.parseFloat(matches[1]); - let bytes: number; - if (matches[5]) { - bytes = quantity * Math.pow(10, Number.parseFloat(matches[5])); - } else { - const suffix = matches[3] || ""; - bytes = quantity * BYTES_PER_UNIT[suffix as MemoryUnit]; - } - return bytes / (1 << 20); -} - /** * Logs an error from a failed function deployment. * @param func The function that was unsuccessfully deployed. diff --git a/src/gcp/cloudscheduler.ts b/src/gcp/cloudscheduler.ts index 89bf58a900c..8682a021117 100644 --- a/src/gcp/cloudscheduler.ts +++ b/src/gcp/cloudscheduler.ts @@ -239,7 +239,7 @@ export async function jobFromEndpoint( scheduled: "true", }, }; - } else if (endpoint.platform === "gcfv2") { + } else if (endpoint.platform === "gcfv2" || endpoint.platform === "run") { job.timeZone = endpoint.scheduleTrigger.timeZone || DEFAULT_TIME_ZONE_V2; job.httpTarget = { uri: endpoint.uri!, diff --git a/src/gcp/k8s.spec.ts b/src/gcp/k8s.spec.ts new file mode 100644 index 00000000000..667b67b484d --- /dev/null +++ b/src/gcp/k8s.spec.ts @@ -0,0 +1,30 @@ +import { expect } from "chai"; +import * as k8s from "./k8s"; + +describe("megabytes", () => { + enum Bytes { + KB = 1e3, + MB = 1e6, + GB = 1e9, + KiB = 1 << 10, + MiB = 1 << 20, + GiB = 1 << 30, + } + + it("Should handle decimal SI units", () => { + expect(k8s.mebibytes("1000k")).to.equal((1000 * Bytes.KB) / Bytes.MiB); + expect(k8s.mebibytes("1.5M")).to.equal((1.5 * Bytes.MB) / Bytes.MiB); + expect(k8s.mebibytes("1G")).to.equal(Bytes.GB / Bytes.MiB); + }); + + it("Should handle binary SI units", () => { + expect(k8s.mebibytes("1Mi")).to.equal(Bytes.MiB / Bytes.MiB); + expect(k8s.mebibytes("1Gi")).to.equal(Bytes.GiB / Bytes.MiB); + }); + + it("Should handle no unit", () => { + expect(k8s.mebibytes("100000")).to.equal(100000 / Bytes.MiB); + expect(k8s.mebibytes("1e9")).to.equal(1e9 / Bytes.MiB); + expect(k8s.mebibytes("1.5E6")).to.equal((1.5 * 1e6) / Bytes.MiB); + }); +}); diff --git a/src/gcp/k8s.ts b/src/gcp/k8s.ts new file mode 100644 index 00000000000..338e7184120 --- /dev/null +++ b/src/gcp/k8s.ts @@ -0,0 +1,69 @@ +// AvailableMemory suffixes and their byte count. +type MemoryUnit = "" | "k" | "M" | "G" | "T" | "Ki" | "Mi" | "Gi" | "Ti"; +const BYTES_PER_UNIT: Record = { + "": 1, + k: 1e3, + M: 1e6, + G: 1e9, + T: 1e12, + Ki: 1 << 10, + Mi: 1 << 20, + Gi: 1 << 30, + Ti: 1 << 40, +}; +/** + * Returns the float-precision number of Mebi(not Mega)bytes in a + * Kubernetes-style quantity + * Must serve the same results as + * https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + */ + +export function mebibytes(memory: string): number { + const re = /^([0-9]+(\.[0-9]*)?)(Ki|Mi|Gi|Ti|k|M|G|T|([eE]([0-9]+)))?$/; + const matches = re.exec(memory); + if (!matches) { + throw new Error(`Invalid memory quantity "${memory}""`); + } + const quantity = Number.parseFloat(matches[1]); + let bytes: number; + if (matches[5]) { + bytes = quantity * Math.pow(10, Number.parseFloat(matches[5])); + } else { + const suffix = matches[3] || ""; + bytes = quantity * BYTES_PER_UNIT[suffix as MemoryUnit]; + } + return bytes / (1 << 20); +} + +export interface PlaintextEnvVar { + name: string; + value: string; +} + +export interface SecretEnvVar { + name: string; + valueSource: { + secretKeyRef: { + secret: string; // Secret name + version?: string; // Optional version, defaults to latest + }; + }; +} + +export type EnvVar = PlaintextEnvVar | SecretEnvVar; + +export type ResourceType = "cpu" | "memory" | "nvidia.com/gpu"; + +export interface Container { + name?: string; + image: string; + command?: string[]; + args?: string[]; + env: EnvVar[]; + workingDir?: string; + resources: { + limits: Record; + }; + cpuIdle?: boolean; + startupCpuBoost?: boolean; +} diff --git a/src/gcp/runv2.spec.ts b/src/gcp/runv2.spec.ts new file mode 100644 index 00000000000..c6c65fb103a --- /dev/null +++ b/src/gcp/runv2.spec.ts @@ -0,0 +1,413 @@ +import { expect } from "chai"; + +import * as runv2 from "./runv2"; +import * as backend from "../deploy/functions/backend"; +import { latest } from "../deploy/functions/runtimes/supported"; +import { CODEBASE_LABEL } from "../functions/constants"; + +describe("runv2", () => { + const PROJECT_ID = "project-id"; + const LOCATION = "us-central1"; + const SERVICE_ID = "functionid"; // TODO: use other normalization method if/when implemented. + const FUNCTION_ID = "functionId"; // Logical function ID + const IMAGE_URI = "gcr.io/project/image:latest"; + + const BASE_ENDPOINT_RUN: Omit = { + platform: "run", + id: FUNCTION_ID, + project: PROJECT_ID, + region: LOCATION, + entryPoint: FUNCTION_ID, + runtime: latest("nodejs"), + availableMemoryMb: 256, + cpu: 1, + }; + + const BASE_RUN_SERVICE: Omit = { + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${SERVICE_ID}`, + labels: { + [runv2.RUNTIME_LABEL]: latest("nodejs"), + [runv2.CLIENT_NAME_LABEL]: "firebase-functions", + }, + annotations: { + [runv2.CLIENT_NAME_ANNOTATION]: "cli-firebase", + [runv2.FUNCTION_TARGET_ANNOTATION]: FUNCTION_ID, + [runv2.FUNCTION_ID_ANNOTATION]: FUNCTION_ID, + [runv2.CPU_BOOST_ANNOTATION]: "true", + }, + template: { + containers: [ + { + name: runv2.DEFAULT_FUNCTION_CONTAINER_NAME, + image: IMAGE_URI, + env: [], + resources: { + limits: { + cpu: "1", + memory: "256Mi", + }, + startupCpuBoost: true, + }, + }, + ], + containerConcurrency: backend.DEFAULT_CONCURRENCY, + }, + }; + + describe("serviceFromEndpoint", () => { + it("should copy a minimal endpoint", () => { + const endpoint: backend.Endpoint = { + ...BASE_ENDPOINT_RUN, + httpsTrigger: {}, + }; + + expect(runv2.serviceFromEndpoint(endpoint, IMAGE_URI)).to.deep.equal(BASE_RUN_SERVICE); + }); + + it("should handle different codebase", () => { + const endpoint: backend.Endpoint = { + ...BASE_ENDPOINT_RUN, + codebase: "my-codebase", + httpsTrigger: {}, + }; + const expectedServiceInput: Omit = { + ...BASE_RUN_SERVICE, + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${FUNCTION_ID.toLowerCase()}`, + labels: { + ...BASE_RUN_SERVICE.labels, + [CODEBASE_LABEL]: "my-codebase", + }, + }; + expect(runv2.serviceFromEndpoint(endpoint, IMAGE_URI)).to.deep.equal(expectedServiceInput); + }); + + it("should copy environment variables", () => { + const endpoint: backend.Endpoint = { + ...BASE_ENDPOINT_RUN, + httpsTrigger: {}, + environmentVariables: { FOO: "bar" }, + }; + const expectedServiceInput = JSON.parse( + JSON.stringify({ + ...BASE_RUN_SERVICE, + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${FUNCTION_ID.toLowerCase()}`, + }), + ); + expectedServiceInput.template.containers[0].env = [{ name: "FOO", value: "bar" }]; + + expect(runv2.serviceFromEndpoint(endpoint, IMAGE_URI)).to.deep.equal(expectedServiceInput); + }); + + it("should copy secret environment variables", () => { + const endpoint: backend.Endpoint = { + ...BASE_ENDPOINT_RUN, + httpsTrigger: {}, + secretEnvironmentVariables: [ + { key: "MY_SECRET", secret: "secret-name", projectId: PROJECT_ID, version: "1" }, + ], + }; + const expectedServiceInput = JSON.parse( + JSON.stringify({ + ...BASE_RUN_SERVICE, + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${FUNCTION_ID.toLowerCase()}`, + }), + ); + expectedServiceInput.template.containers[0].env = [ + { + name: "MY_SECRET", + valueSource: { secretKeyRef: { secret: "secret-name", version: "1" } }, + }, + ]; + expect(runv2.serviceFromEndpoint(endpoint, IMAGE_URI)).to.deep.equal(expectedServiceInput); + }); + + it("should set min/max instances annotations", () => { + const endpoint: backend.Endpoint = { + ...BASE_ENDPOINT_RUN, + httpsTrigger: {}, + minInstances: 1, + maxInstances: 10, + }; + const expectedServiceInput = JSON.parse( + JSON.stringify({ + ...BASE_RUN_SERVICE, + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${FUNCTION_ID.toLowerCase()}`, + }), + ); + expectedServiceInput.annotations[runv2.MIN_INSTANCES_ANNOTATION] = "1"; + expectedServiceInput.annotations[runv2.MAX_INSTANCES_ANNOTATION] = "10"; + + expect(runv2.serviceFromEndpoint(endpoint, IMAGE_URI)).to.deep.equal(expectedServiceInput); + }); + + it("should set concurrency", () => { + const endpoint: backend.Endpoint = { + ...BASE_ENDPOINT_RUN, + httpsTrigger: {}, + concurrency: 50, + }; + const expectedServiceInput = JSON.parse( + JSON.stringify({ + ...BASE_RUN_SERVICE, + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${FUNCTION_ID.toLowerCase()}`, + }), + ); + expectedServiceInput.template.containerConcurrency = 50; + + expect(runv2.serviceFromEndpoint(endpoint, IMAGE_URI)).to.deep.equal(expectedServiceInput); + }); + + it("should set memory and CPU", () => { + const endpoint: backend.Endpoint = { + ...BASE_ENDPOINT_RUN, + httpsTrigger: {}, + availableMemoryMb: 512, + cpu: 2, + }; + const expectedServiceInput = JSON.parse( + JSON.stringify({ + ...BASE_RUN_SERVICE, + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${FUNCTION_ID.toLowerCase()}`, + }), + ); + expectedServiceInput.template.containers[0].resources.limits.memory = "512Mi"; + expectedServiceInput.template.containers[0].resources.limits.cpu = "2"; + + expect(runv2.serviceFromEndpoint(endpoint, IMAGE_URI)).to.deep.equal(expectedServiceInput); + }); + + it("should remove deployment-tool label", () => { + const endpoint: backend.Endpoint = { + ...BASE_ENDPOINT_RUN, + httpsTrigger: {}, + labels: { "deployment-tool": "firebase-cli" }, + }; + const result = runv2.serviceFromEndpoint(endpoint, IMAGE_URI); + expect(result.labels?.["deployment-tool"]).to.be.undefined; + expect(result.labels?.[runv2.CLIENT_NAME_LABEL]).to.equal("firebase-functions"); + }); + }); + + describe("endpointFromService", () => { + it("should copy a minimal service", () => { + const service: Omit = { + ...BASE_RUN_SERVICE, + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${SERVICE_ID}`, + labels: { + [runv2.RUNTIME_LABEL]: latest("nodejs"), + }, + annotations: { + [runv2.FUNCTION_ID_ANNOTATION]: FUNCTION_ID, // Using FUNCTION_ID_ANNOTATION as primary source for id + [runv2.FUNCTION_TARGET_ANNOTATION]: "customEntryPoint", + }, + template: { + containers: [ + { + name: runv2.DEFAULT_FUNCTION_CONTAINER_NAME, + image: IMAGE_URI, + resources: { + limits: { + cpu: "1", + memory: "256Mi", + }, + }, + }, + ], + }, + }; + + const expectedEndpoint: backend.Endpoint = { + platform: "run", + id: FUNCTION_ID, + project: PROJECT_ID, + region: LOCATION, + runtime: latest("nodejs"), + entryPoint: "customEntryPoint", + availableMemoryMb: 256, + cpu: 1, + httpsTrigger: {}, + labels: { + [runv2.RUNTIME_LABEL]: latest("nodejs"), + }, + environmentVariables: {}, + secretEnvironmentVariables: [], + }; + + expect(runv2.endpointFromService(service)).to.deep.equal(expectedEndpoint); + }); + + it("should detect a service that's GCF managed", () => { + const service: Omit = { + ...BASE_RUN_SERVICE, + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${SERVICE_ID}`, + labels: { + [runv2.RUNTIME_LABEL]: latest("nodejs"), + [runv2.CLIENT_NAME_LABEL]: "cloud-functions", // This indicates it's GCF managed + }, + annotations: { + [runv2.FUNCTION_ID_ANNOTATION]: FUNCTION_ID, // Using FUNCTION_ID_ANNOTATION as primary source for id + [runv2.FUNCTION_TARGET_ANNOTATION]: "customEntryPoint", + }, + template: { + containers: [ + { + name: runv2.DEFAULT_FUNCTION_CONTAINER_NAME, + image: IMAGE_URI, + resources: { + limits: { + cpu: "1", + memory: "256Mi", + }, + }, + }, + ], + }, + }; + + const expectedEndpoint: backend.Endpoint = { + platform: "gcfv2", + id: FUNCTION_ID, + project: PROJECT_ID, + region: LOCATION, + runtime: latest("nodejs"), + entryPoint: "customEntryPoint", + availableMemoryMb: 256, + cpu: 1, + httpsTrigger: {}, + labels: { + [runv2.RUNTIME_LABEL]: latest("nodejs"), + [runv2.CLIENT_NAME_LABEL]: "cloud-functions", + }, + environmentVariables: {}, + secretEnvironmentVariables: [], + }; + + expect(runv2.endpointFromService(service)).to.deep.equal(expectedEndpoint); + }); + + it("should derive id from FUNCTION_TARGET_ANNOTATION if FUNCTION_ID_ANNOTATION is missing", () => { + const service: Omit = { + ...BASE_RUN_SERVICE, + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${SERVICE_ID}`, + labels: { + [runv2.RUNTIME_LABEL]: latest("nodejs"), + }, + annotations: { + [runv2.FUNCTION_TARGET_ANNOTATION]: FUNCTION_ID, // This will be used for id and entryPoint + }, + template: { + containers: [ + { + name: runv2.DEFAULT_FUNCTION_CONTAINER_NAME, + image: IMAGE_URI, + resources: { limits: { cpu: "1", memory: "256Mi" } }, + }, + ], + }, + }; + const result = runv2.endpointFromService(service); + expect(result.id).to.equal(FUNCTION_ID); + expect(result.entryPoint).to.equal(FUNCTION_ID); + }); + + it("should derive id from service name part if FUNCTION_ID_ANNOTATION and FUNCTION_TARGET_ANNOTATION are missing", () => { + const service: Omit = { + ...BASE_RUN_SERVICE, + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${SERVICE_ID}`, + labels: { + [runv2.RUNTIME_LABEL]: latest("nodejs"), + }, + annotations: { + // No FUNCTION_ID_ANNOTATION or FUNCTION_TARGET_ANNOTATION + }, + template: { + containers: [ + { + name: runv2.DEFAULT_FUNCTION_CONTAINER_NAME, + image: IMAGE_URI, + resources: { limits: { cpu: "1", memory: "256Mi" } }, + }, + ], + }, + }; + const result = runv2.endpointFromService(service); + expect(result.id).to.equal(SERVICE_ID); + expect(result.entryPoint).to.equal(SERVICE_ID); + }); + + it("should copy env vars and secrets", () => { + const service: runv2.Service = JSON.parse(JSON.stringify(BASE_RUN_SERVICE)); + service.template.containers![0].env = [ + { name: "FOO", value: "bar" }, + { + name: "MY_SECRET", + valueSource: { + secretKeyRef: { + secret: `projects/${PROJECT_ID}/secrets/secret-name`, + version: "1", + }, + }, + }, + ]; + + const result = runv2.endpointFromService(service); + expect(result.environmentVariables).to.deep.equal({ FOO: "bar" }); + expect(result.secretEnvironmentVariables).to.deep.equal([ + { key: "MY_SECRET", secret: "secret-name", projectId: PROJECT_ID, version: "1" }, + ]); + }); + + it("should copy concurrency, min/max instances", () => { + const service: runv2.Service = JSON.parse(JSON.stringify(BASE_RUN_SERVICE)); + service.template.containerConcurrency = 10; + service.annotations![runv2.MIN_INSTANCES_ANNOTATION] = "2"; + service.annotations![runv2.MAX_INSTANCES_ANNOTATION] = "5"; + + const result = runv2.endpointFromService(service); + expect(result.concurrency).to.equal(10); + expect(result.minInstances).to.equal(2); + expect(result.maxInstances).to.equal(5); + }); + + it("should handle missing optional fields gracefully", () => { + const service: runv2.Service = { + name: `projects/${PROJECT_ID}/locations/${LOCATION}/services/${SERVICE_ID}`, + generation: 1, + template: { + containers: [ + { + name: runv2.DEFAULT_FUNCTION_CONTAINER_NAME, + image: IMAGE_URI, + resources: { limits: { memory: "128Mi", cpu: "0.5" } }, // Minimal resources + }, + ], + // No containerConcurrency, no serviceAccount + }, + // No labels, no annotations + createTime: new Date().toISOString(), + updateTime: new Date().toISOString(), + creator: "test@example.com", + lastModifier: "test@example.com", + etag: "test-etag", + }; + + const expectedEndpoint: backend.Endpoint = { + platform: "run", + id: SERVICE_ID, // Derived from service name + project: PROJECT_ID, + region: LOCATION, + runtime: latest("nodejs"), // Default runtime + entryPoint: SERVICE_ID, // No FUNCTION_TARGET_ANNOTATION + availableMemoryMb: 128, + cpu: 0.5, + httpsTrigger: {}, + labels: {}, + environmentVariables: {}, + secretEnvironmentVariables: [], + // concurrency, minInstances, maxInstances will be undefined + }; + + expect(runv2.endpointFromService(service)).to.deep.equal(expectedEndpoint); + }); + }); +}); diff --git a/src/gcp/runv2.ts b/src/gcp/runv2.ts new file mode 100644 index 00000000000..d36c9b1bf01 --- /dev/null +++ b/src/gcp/runv2.ts @@ -0,0 +1,411 @@ +import { Client } from "../apiv2"; +import { FirebaseError } from "../error"; + +// TODO: Consider making this use REP in the future so we can be used by more +// customers. +import { cloudbuildOrigin, runOrigin } from "../api"; +import * as proto from "./proto"; +import { assertImplements, RecursiveKeyOf } from "../metaprogramming"; +import { LongRunningOperation, pollOperation } from "../operation-poller"; +import * as backend from "../deploy/functions/backend"; +import { CODEBASE_LABEL } from "../functions/constants"; +import { EnvVar, mebibytes, PlaintextEnvVar, SecretEnvVar } from "./k8s"; +import { latest, Runtime } from "../deploy/functions/runtimes/supported"; +import { logger } from ".."; +import { partition } from "../functional"; + +export const API_VERSION = "v2"; + +const client = new Client({ + urlPrefix: runOrigin(), + auth: true, + apiVersion: API_VERSION, +}); + +export interface Container { + name: string; + image: string; + command?: string[]; + args?: string[]; + env?: EnvVar[]; + resources?: { + limits?: { + cpu?: string; // e.g. "1", "2", "4" + memory?: string; // e.g. "256Mi", "512Mi", "1Gi" + ["nvidia.com/gpu"]?: string; + }; + startupCpuBoost?: boolean; // If true, the container will get a CPU boost during startup. + }; + // Lots more. Most intereeseting is baseImageUri and maybe buildInfo. +} +export interface RevisionTemplate { + revision?: string; + labels?: Record; + annotations?: Record; + scaling?: { + // N.B. Intentionally omitting revision min/max instance; we should + // never use them. + overflowScaling?: boolean; + }; + vpcAccess?: { + connector?: string; + egress?: "ALL_TRAFFIC" | "PRIVATE_RANGES_ONLY"; + networkinterfaces?: Array<{ + network?: string; + subnetwork?: string; + tags?: string[]; + }>; + }; + timeout?: proto.Duration; + serviceAccount?: string; + containers?: Container[]; + containerConcurrency?: number; +} + +export interface BuildConfig { + name: string; + sourceLocation: string; + functionTarget?: string; + enableAutomaticUpdates?: boolean; + environmentVariables?: Record; + serviceAccount?: string; +} + +// NOTE: This is a minmal copy of Cloud Run needed for our current API usage. +// Add more as needed. +// TODO: Can consider a helper where we have a second RecursiveKeysOf field for +// fields that are optional in input types but we always set them (e.g. empty record) +// in output APIs. +export interface Service { + name: string; + description?: string; + generation: number; + labels?: Record; + annotations?: Record; + tags?: Record; + createTime: string; + updateTime: string; + creator: string; + lastModifier: string; + launchStage?: string; + + // In the proto definition, but not what we use to actually track this it seems? + client?: string; + clientVersion?: string; + + etag: string; + template: RevisionTemplate; + invokerIamDisabled?: boolean; + // Is this redundant with the Build API? + buildConfig?: BuildConfig; +} + +export type ServiceOutputFields = + | "generation" + | "createTime" + | "updateTime" + | "creator" + | "lastModifier" + | "etag"; + +assertImplements>(); + +export interface StorageSource { + bucket: string; + object: string; + generation?: string; +} + +export interface BuildpacksBuild { + // Deprecated, presumedly in favor of baseImage? + runtime?: string; + functionTarget?: string; + cacheImageUrl?: string; + baseImage?: string; + + // NOTE: build-time environment variables, which are not currently used. + environmentVariables?: Record; + + enableAutomaticUpdates?: boolean; + projectDescriptor?: string; +} + +export interface Build { + runtime?: string; + functionTarget?: string; + storageSource: StorageSource; + imageUri: string; + buildpacksBuild: BuildpacksBuild; +} + +export interface SubmitBuildResponse { + buildOperation: string; + baseImageUri?: string; + baseImageWarning?: string; +} + +export async function submitBuild( + projectId: string, + location: string, + build: Build, +): Promise { + const res = await client.post( + `/projects/${projectId}/locations/${location}/builds`, + build, + ); + if (res.status !== 200) { + throw new FirebaseError(`Failed to submit build: ${res.status} ${res.body}`); + } + await pollOperation({ + apiOrigin: cloudbuildOrigin(), + apiVersion: "v1", + operationResourceName: res.body.buildOperation, + }); +} + +export async function updateService(service: Omit): Promise { + const fieldMask = proto.fieldMasks( + service, + /* doNotRecurseIn...*/ "labels", + "annotations", + "tags", + ); + // Always update revision name to ensure null generates a new unique revision name. + fieldMask.push("template.revision"); + const res = await client.post, LongRunningOperation>( + service.name, + service, + { + queryParams: { + updateMask: fieldMask.join(","), + }, + }, + ); + const svc = await pollOperation({ + apiOrigin: runOrigin(), + apiVersion: API_VERSION, + operationResourceName: res.body.name, + }); + return svc; +} + +// TODO: Replace with real version: +function functionNameToServiceName(id: string): string { + return id.toLowerCase().replace(/_/g, "-"); +} + +/** + * The following is the YAML of a v2 function's Run service labels & annotations: + * + * labels: + * goog-drz-cloudfunctions-location: us-central1 + * goog-drz-cloudfunctions-id: ejectrequest + * firebase-functions-hash: 3653cb61dcf8e18a4a8706251b627485a5e83cd0 + * firebase-functions-codebase: js + * goog-managed-by: cloudfunctions + * goog-cloudfunctions-runtime: nodejs22 + * cloud.googleapis.com/location: us-central1 + * annotations: + * run.googleapis.com/custom-audiences: '["https://us-central1-inlined-junkdrawer.cloudfunctions.net/ejectRequest"]' + * run.googleapis.com/client-name: cli-firebase + * run.googleapis.com/build-source-location: gs://gcf-v2-sources-92611791981-us-central1/ejectRequest/function-source.zip#1749833196570851 + * run.googleapis.com/build-environment-variables: '{"GOOGLE_NODE_RUN_SCRIPTS":""}' + * run.googleapis.com/build-function-target: ejectRequest + * run.googleapis.com/build-enable-automatic-updates: 'true' + * run.googleapis.com/build-base-image: us-central1-docker.pkg.dev/serverless-runtimes/google-22-full/runtimes/nodejs22 + * run.googleapis.com/build-image-uri: us-central1-docker.pkg.dev/inlined-junkdrawer/gcf-artifacts/inlined--junkdrawer__us--central1__eject_request:version_1 + * run.googleapis.com/build-name: projects/92611791981/locations/us-central1/builds/4d41c5e1-9ab9-4889-826b-c64a0d58c99a + * serving.knative.dev/creator: service-92611791981@gcf-admin-robot.iam.gserviceaccount.com + * serving.knative.dev/lastModifier: service-92611791981@gcf-admin-robot.iam.gserviceaccount.com + * run.googleapis.com/operation-id: 67a480e9-24ac-40bd-aaa1-a76e87bf3e45 + * run.googleapis.com/ingress: all + * run.googleapis.com/ingress-status: all + * cloudfunctions.googleapis.com/function-id: ejectRequest + * run.googleapis.com/urls: '["https://ejectrequest-92611791981.us-central1.run.app","https://us-central1-inlined-junkdrawer.cloudfunctions.net/ejectRequest","https://ejectrequest-uvb3o4q2mq-uc.a.run.app"]' + * + * After ejection it is: + * labels: + * goog-drz-cloudfunctions-location: us-central1 + * goog-drz-cloudfunctions-id: ejectrequest + * firebase-functions-hash: 3653cb61dcf8e18a4a8706251b627485a5e83cd0 + * firebase-functions-codebase: js + * goog-managed-by: '' + * goog-cloudfunctions-runtime: nodejs22 + * cloud.googleapis.com/location: us-central1 + * annotations: + * serving.knative.dev/creator: service-92611791981@gcf-admin-robot.iam.gserviceaccount.com + * serving.knative.dev/lastModifier: service-92611791981@gcf-admin-robot.iam.gserviceaccount.com + * run.googleapis.com/custom-audiences: '["https://us-central1-inlined-junkdrawer.cloudfunctions.net/ejectRequest"]' + * run.googleapis.com/client-name: cli-firebase + * run.googleapis.com/build-source-location: gs://gcf-v2-sources-92611791981-us-central1/ejectRequest/function-source.zip#1749833196570851 + * run.googleapis.com/build-environment-variables: '{"GOOGLE_NODE_RUN_SCRIPTS":""}' + * run.googleapis.com/build-function-target: ejectRequest + * run.googleapis.com/build-enable-automatic-updates: 'true' + * run.googleapis.com/build-base-image: us-central1-docker.pkg.dev/serverless-runtimes/google-22-full/runtimes/nodejs22 + * run.googleapis.com/build-image-uri: us-central1-docker.pkg.dev/inlined-junkdrawer/gcf-artifacts/inlined--junkdrawer__us--central1__eject_request:version_1 + * run.googleapis.com/build-name: projects/92611791981/locations/us-central1/builds/4d41c5e1-9ab9-4889-826b-c64a0d58c99a + * cloudfunctions.googleapis.com/function-id: ejectRequest + * run.googleapis.com/operation-id: 8fed392e-1ded-4499-b233-ac689857be15 + * run.googleapis.com/ingress: all + * run.googleapis.com/ingress-status: all + * run.googleapis.com/urls: '["https://ejectrequest-92611791981.us-central1.run.app","https://us-central1-inlined-junkdrawer.cloudfunctions.net/ejectRequest","https://ejectrequest-uvb3o4q2mq-uc.a.run.app"]' + * + * This sample was taken from an https function, but we should assume that all labels we use in GCF translate to Run + * and preserve them to keep the Console similar for GCF 2nd gen vs Cloud Run functions when reading. + * Notable differences from the Functions interface though is that "goog-managed-by" should be firebase-functions and + * "run.googleapis.com/client-name" should be "cli-firebase" on eject. + */ + +// NOTE: I'm seeing different values for functions that were ejected vs functions created in the Cloud Console directly with CRF. +// E.g. build-function-target may be a scalar like "ejectRequest" or a JSON object like '{"worker":"ejectRequest"}' where +// the key is the container name. Tinkering may be necessary to see if one or the other is better. +export const RUNTIME_LABEL = "goog-cloudfunctions-runtime"; +export const CLIENT_NAME_LABEL = "goog-managed-by"; +export const CLIENT_NAME_ANNOTATION = "run.googleapis.com/client-name"; +export const CPU_BOOST_ANNOTATION = "run.googleapis.com/startup-cpu-boost"; +export const TRIGGER_TYPE_ANNOTATION = "cloudfunctions.googleapis.com/trigger-type"; +export const FUNCTION_TARGET_ANNOTATION = "run.googleapis.com/build-function-target"; // e.g. '{"worker":"triggerTest"}' +export const FUNCTION_ID_ANNOTATION = "cloudfunctions.googleapis.com/function-id"; // e.g. "triggerTest" +export const BASE_IMAGE_ANNOTATION = "run.googleapis.com/base-images"; // : '{"worker":"us-central1-docker.pkg.dev/serverless-runtimes/google-22-full/runtimes/nodejs22"}' +export const MAX_INSTANCES_ANNOTATION = "autoscaling.knative.dev/maxScale"; +export const MIN_INSTANCES_ANNOTATION = "autoscaling.knative.dev/minScale"; +export const DEFAULT_FUNCTION_CONTAINER_NAME = "worker"; +// Partial implementation. A full implementation may require more refactoring. +// E.g. server-side we need to know the actual names of the resources we're +// referencing. So maybe endpointFromSerivce should be async and fetch the +// values from the dependent services? But serviceFromEndpoint currently +// only returns the service and not the dependent resources, which we will +// need for updates. +export function endpointFromService(service: Omit): backend.Endpoint { + const [, /* projects*/ project /* locations*/, , location /* services*/, , svcId] = + service.name.split("/"); + const id = + service.annotations?.[FUNCTION_ID_ANNOTATION] || + service.annotations?.[FUNCTION_TARGET_ANNOTATION] || + svcId; + const memory = mebibytes(service.template.containers![0]!.resources!.limits!.memory!); + if (!backend.isValidMemoryOption(memory)) { + logger.debug("Converting a service to an endpoint with an invalid memory option", memory); + } + const cpu = Number(service.template.containers![0]!.resources!.limits!.cpu); + const endpoint: backend.Endpoint = { + platform: service.labels?.[CLIENT_NAME_LABEL] === "cloud-functions" ? "gcfv2" : "run", + id, + project, + labels: service.labels || {}, + region: location, + runtime: (service.labels?.[RUNTIME_LABEL] as Runtime) || latest("nodejs"), + availableMemoryMb: memory as backend.MemoryOptions, + cpu: cpu, + entryPoint: + service.annotations?.[FUNCTION_TARGET_ANNOTATION] || + service.annotations?.[FUNCTION_ID_ANNOTATION] || + id, + + // TODO: trigger types. + httpsTrigger: {}, + }; + proto.renameIfPresent(endpoint, service.template, "concurrency", "containerConcurrency"); + proto.renameIfPresent(endpoint, service.labels || {}, "codebase", CODEBASE_LABEL); + if (service.annotations?.[MIN_INSTANCES_ANNOTATION]) { + endpoint.minInstances = Number(service.annotations[MIN_INSTANCES_ANNOTATION]); + } + if (service.annotations?.[MAX_INSTANCES_ANNOTATION]) { + endpoint.maxInstances = Number(service.annotations[MAX_INSTANCES_ANNOTATION]); + } + + const [env, secretEnv] = partition( + service.template.containers![0]!.env || [], + (e) => "value" in e, + ) as [PlaintextEnvVar[], SecretEnvVar[]]; + endpoint.environmentVariables = env.reduce>((acc, e) => { + acc[e.name] = e.value; + return acc; + }, {}); + endpoint.secretEnvironmentVariables = secretEnv.map((e) => { + const [, /* projects*/ projectId /* secrets*/, , secret] = + e.valueSource.secretKeyRef.secret.split("/"); + return { + key: e.name, + projectId, + secret, + version: e.valueSource.secretKeyRef.version || "latest", + }; + }); + return endpoint; +} + +export function serviceFromEndpoint( + endpoint: backend.Endpoint, + image: string, +): Omit { + const labels: Record = { + ...endpoint.labels, + [RUNTIME_LABEL]: endpoint.runtime, + [CLIENT_NAME_LABEL]: "firebase-functions", + }; + + // A bit of a hack, but other code assumes the Functions method of indicating deployment tool and + // injects this as a label. To avoid thinking that this is actually meaningful in the CRF world, + // we delete it here. + delete labels["deployment-tool"]; + + // TODO: hash + if (endpoint.codebase) { + labels[CODEBASE_LABEL] = endpoint.codebase; + } + + const annotations: Record = { + [CLIENT_NAME_ANNOTATION]: "cli-firebase", + [FUNCTION_TARGET_ANNOTATION]: endpoint.id, + [FUNCTION_ID_ANNOTATION]: endpoint.id, + [CPU_BOOST_ANNOTATION]: "true", + // TODO: Add run.googleapis.com/base-images: {'worker': } for the runtime and set + // template.runtimeClassName: run.googleapis.com/linux-base-image-update + }; + if (endpoint.minInstances) { + annotations[MIN_INSTANCES_ANNOTATION] = String(endpoint.minInstances); + } + if (endpoint.maxInstances) { + annotations[MAX_INSTANCES_ANNOTATION] = String(endpoint.maxInstances); + } + const template: RevisionTemplate = { + containers: [ + { + name: "worker", + image, + env: [ + ...Object.entries(endpoint.environmentVariables || {}).map(([name, value]) => ({ + name, + value, + })), + ...(endpoint.secretEnvironmentVariables || []).map((secret) => ({ + name: secret.key, + valueSource: { + secretKeyRef: { + secret: secret.secret, + version: secret.version, + }, + }, + })), + ], + resources: { + limits: { + cpu: String((endpoint.cpu as Number) || 1), + memory: `${endpoint.availableMemoryMb || 256}Mi`, + }, + startupCpuBoost: true, + }, + }, + ], + containerConcurrency: endpoint.concurrency || backend.DEFAULT_CONCURRENCY, + }; + proto.renameIfPresent(template, endpoint, "containerConcurrency", "concurrency"); + // TODO: other trigger types, service accounts, concurrency, etc. + return { + name: `projects/${endpoint.project}/locations/${endpoint.region}/services/${functionNameToServiceName(endpoint.id)}`, + labels, + annotations, + template, + }; +}