From abf0196f07161bfeb9487b0a43e12eacd765a853 Mon Sep 17 00:00:00 2001 From: Benjamin Ingberg Date: Mon, 3 Feb 2025 22:54:20 +0100 Subject: [PATCH] Switch sharding struct from array to map Improves the ability to perform resharding by switching the sharding struct from array to map. Each map entry has a key which is used in rendezvous hashing to deterministically select which shard to use from the collection of keys. When a shard is removed it is guaranteed that only blobs which belonged to the removed shard will resolve to a new shard. In combination with ReadFallbackConfigurations this allows adding and removing shards with minimal need to rebalance the blobs between the shards. For more details and migration instructions, see: https://github.com/buildbarn/bb-adrs/blob/bf2066633e1712a3ef7c295d37cd52e65867391d/0011-rendezvous-hashing.md --- internal/mock/BUILD.bazel | 14 +- pkg/blobstore/configuration/BUILD.bazel | 1 + .../configuration/new_blob_access.go | 94 +++-- pkg/blobstore/sharding/BUILD.bazel | 8 +- .../sharding/integration/BUILD.bazel | 25 ++ .../benchmarking_integration_test.go | 289 +++++++++++++++ pkg/blobstore/sharding/legacy/BUILD.bazel | 43 +++ pkg/blobstore/sharding/legacy/README.md | 11 + .../sharding/legacy/shard_permuter.go | 18 + .../sharding/legacy/sharding_blob_access.go | 134 +++++++ .../legacy/sharding_blob_access_test.go | 194 ++++++++++ .../{ => legacy}/weighted_shard_permuter.go | 2 +- .../weighted_shard_permuter_test.go | 6 +- .../sharding/rendezvous_shard_selector.go | 155 ++++++++ .../rendezvous_shard_selector_test.go | 104 ++++++ pkg/blobstore/sharding/shard_permuter.go | 19 - pkg/blobstore/sharding/shard_selector.go | 20 ++ .../sharding/sharding_blob_access.go | 78 ++-- .../sharding/sharding_blob_access_test.go | 88 ++--- .../configuration/blobstore/blobstore.pb.go | 340 +++++++++++------- .../configuration/blobstore/blobstore.proto | 46 ++- 21 files changed, 1387 insertions(+), 302 deletions(-) create mode 100644 pkg/blobstore/sharding/integration/BUILD.bazel create mode 100644 pkg/blobstore/sharding/integration/benchmarking_integration_test.go create mode 100644 pkg/blobstore/sharding/legacy/BUILD.bazel create mode 100644 pkg/blobstore/sharding/legacy/README.md create mode 100644 pkg/blobstore/sharding/legacy/shard_permuter.go create mode 100644 pkg/blobstore/sharding/legacy/sharding_blob_access.go create mode 100644 pkg/blobstore/sharding/legacy/sharding_blob_access_test.go rename pkg/blobstore/sharding/{ => legacy}/weighted_shard_permuter.go (98%) rename pkg/blobstore/sharding/{ => legacy}/weighted_shard_permuter_test.go (83%) create mode 100644 pkg/blobstore/sharding/rendezvous_shard_selector.go create mode 100644 pkg/blobstore/sharding/rendezvous_shard_selector_test.go delete mode 100644 pkg/blobstore/sharding/shard_permuter.go create mode 100644 pkg/blobstore/sharding/shard_selector.go diff --git a/internal/mock/BUILD.bazel b/internal/mock/BUILD.bazel index 057f0c946..867976cb2 100644 --- a/internal/mock/BUILD.bazel +++ b/internal/mock/BUILD.bazel @@ -86,13 +86,23 @@ gomock( gomock( name = "blobstore_sharding", out = "blobstore_sharding.go", - interfaces = ["ShardPermuter"], + interfaces = ["ShardSelector"], library = "//pkg/blobstore/sharding", mockgen_model_library = "@org_uber_go_mock//mockgen/model", mockgen_tool = "@org_uber_go_mock//mockgen", package = "mock", ) +gomock( + name = "blobstore_legacy_sharding", + out = "blobstore_legacy_sharding.go", + interfaces = ["ShardPermuter"], + library = "//pkg/blobstore/sharding/legacy", + mockgen_model_library = "@org_uber_go_mock//mockgen/model", + mockgen_tool = "@org_uber_go_mock//mockgen", + package = "mock", +) + gomock( name = "blobstore_slicing", out = "blobstore_slicing.go", @@ -357,6 +367,7 @@ go_library( "aliases.go", "auth.go", "blobstore.go", + "blobstore_legacy_sharding.go", "blobstore_local.go", "blobstore_replication.go", "blobstore_sharding.go", @@ -391,6 +402,7 @@ go_library( "//pkg/blobstore/buffer", "//pkg/blobstore/local", "//pkg/blobstore/sharding", + "//pkg/blobstore/sharding/legacy", "//pkg/blobstore/slicing", "//pkg/builder", "//pkg/clock", diff --git a/pkg/blobstore/configuration/BUILD.bazel b/pkg/blobstore/configuration/BUILD.bazel index 7986570a1..a3236d0da 100644 --- a/pkg/blobstore/configuration/BUILD.bazel +++ b/pkg/blobstore/configuration/BUILD.bazel @@ -29,6 +29,7 @@ go_library( "//pkg/blobstore/readfallback", "//pkg/blobstore/replication", "//pkg/blobstore/sharding", + "//pkg/blobstore/sharding/legacy", "//pkg/blockdevice", "//pkg/capabilities", "//pkg/clock", diff --git a/pkg/blobstore/configuration/new_blob_access.go b/pkg/blobstore/configuration/new_blob_access.go index 0e1c4966e..6a966cb97 100644 --- a/pkg/blobstore/configuration/new_blob_access.go +++ b/pkg/blobstore/configuration/new_blob_access.go @@ -13,6 +13,7 @@ import ( "github.com/buildbarn/bb-storage/pkg/blobstore/readcaching" "github.com/buildbarn/bb-storage/pkg/blobstore/readfallback" "github.com/buildbarn/bb-storage/pkg/blobstore/sharding" + "github.com/buildbarn/bb-storage/pkg/blobstore/sharding/legacy" "github.com/buildbarn/bb-storage/pkg/blockdevice" "github.com/buildbarn/bb-storage/pkg/clock" "github.com/buildbarn/bb-storage/pkg/digest" @@ -85,41 +86,92 @@ func (nc *simpleNestedBlobAccessCreator) newNestedBlobAccessBare(configuration * DigestKeyFormat: slow.DigestKeyFormat, }, "read_caching", nil case *pb.BlobAccessConfiguration_Sharding: - backends := make([]blobstore.BlobAccess, 0, len(backend.Sharding.Shards)) - weights := make([]uint32, 0, len(backend.Sharding.Shards)) - var combinedDigestKeyFormat *digest.KeyFormat - for _, shard := range backend.Sharding.Shards { - if shard.Backend == nil { - // Drained backend. - backends = append(backends, nil) - } else { - // Undrained backend. - backend, err := nc.NewNestedBlobAccess(shard.Backend, creator) - if err != nil { - return BlobAccessInfo{}, "", err + if backend.Sharding.Legacy != nil { + backends := make([]blobstore.BlobAccess, 0, len(backend.Sharding.Legacy.ShardOrder)) + weights := make([]uint32, 0, len(backend.Sharding.Legacy.ShardOrder)) + var combinedDigestKeyFormat *digest.KeyFormat + for _, key := range backend.Sharding.Legacy.ShardOrder { + shard, exists := backend.Sharding.Shards[key] + if !exists { + return BlobAccessInfo{}, "", status.Errorf(codes.InvalidArgument, "Legacy sharding blob access refers to non-existing key %s", key) } - backends = append(backends, backend.BlobAccess) - if combinedDigestKeyFormat == nil { - combinedDigestKeyFormat = &backend.DigestKeyFormat + if shard.Backend == nil { + // Drained backend + backends = append(backends, nil) } else { - newDigestKeyFormat := combinedDigestKeyFormat.Combine(backend.DigestKeyFormat) - combinedDigestKeyFormat = &newDigestKeyFormat + // Undrained backend + backend, err := nc.NewNestedBlobAccess(shard.Backend, creator) + if err != nil { + return BlobAccessInfo{}, "", err + } + backends = append(backends, backend.BlobAccess) + if combinedDigestKeyFormat == nil { + combinedDigestKeyFormat = &backend.DigestKeyFormat + } else { + newDigestKeyFormat := combinedDigestKeyFormat.Combine(backend.DigestKeyFormat) + combinedDigestKeyFormat = &newDigestKeyFormat + } } + + if shard.Weight == 0 { + return BlobAccessInfo{}, "", status.Errorf(codes.InvalidArgument, "Shards must have positive weights") + } + weights = append(weights, shard.Weight) + } + + if combinedDigestKeyFormat == nil { + return BlobAccessInfo{}, "", status.Errorf(codes.InvalidArgument, "Cannot create sharding blob access without any undrained backends") + } + return BlobAccessInfo{ + BlobAccess: legacy.NewShardingBlobAccess( + backends, + legacy.NewWeightedShardPermuter(weights), + backend.Sharding.Legacy.HashInitialization, + ), + DigestKeyFormat: *combinedDigestKeyFormat, + }, "sharding", nil + } + backends := make([]sharding.ShardBackend, 0, len(backend.Sharding.Shards)) + shards := make([]sharding.Shard, 0, len(backend.Sharding.Shards)) + keys := make([]string, 0, len(backend.Sharding.Shards)) + var combinedDigestKeyFormat *digest.KeyFormat + for key, shard := range backend.Sharding.Shards { + if shard.Backend == nil { + return BlobAccessInfo{}, "", status.Errorf(codes.InvalidArgument, "Shard '%s' has an undefined backend, drained backends are only allowed when running in Legacy mode", key) + } + backend, err := nc.NewNestedBlobAccess(shard.Backend, creator) + if err != nil { + return BlobAccessInfo{}, "", err + } + backends = append(backends, sharding.ShardBackend{Backend: backend.BlobAccess, Key: key}) + if combinedDigestKeyFormat == nil { + combinedDigestKeyFormat = &backend.DigestKeyFormat + } else { + newDigestKeyFormat := combinedDigestKeyFormat.Combine(backend.DigestKeyFormat) + combinedDigestKeyFormat = &newDigestKeyFormat } if shard.Weight == 0 { return BlobAccessInfo{}, "", status.Errorf(codes.InvalidArgument, "Shards must have positive weights") } - weights = append(weights, shard.Weight) + shards = append(shards, sharding.Shard{ + Key: key, + Weight: shard.Weight, + }) + keys = append(keys, key) } if combinedDigestKeyFormat == nil { - return BlobAccessInfo{}, "", status.Errorf(codes.InvalidArgument, "Cannot create sharding blob access without any undrained backends") + return BlobAccessInfo{}, "", status.Errorf(codes.InvalidArgument, "Cannot create sharding blob access without any backends") + } + shardSelector, err := sharding.NewRendezvousShardSelector(shards) + if err != nil { + return BlobAccessInfo{}, "", status.Errorf(codes.InvalidArgument, "Could not create rendezvous shard selector") } return BlobAccessInfo{ BlobAccess: sharding.NewShardingBlobAccess( backends, - sharding.NewWeightedShardPermuter(weights), - backend.Sharding.HashInitialization), + shardSelector, + ), DigestKeyFormat: *combinedDigestKeyFormat, }, "sharding", nil case *pb.BlobAccessConfiguration_Mirrored: diff --git a/pkg/blobstore/sharding/BUILD.bazel b/pkg/blobstore/sharding/BUILD.bazel index db8f532f7..c5794b2ad 100644 --- a/pkg/blobstore/sharding/BUILD.bazel +++ b/pkg/blobstore/sharding/BUILD.bazel @@ -3,9 +3,9 @@ load("@rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "sharding", srcs = [ - "shard_permuter.go", + "rendezvous_shard_selector.go", + "shard_selector.go", "sharding_blob_access.go", - "weighted_shard_permuter.go", ], importpath = "github.com/buildbarn/bb-storage/pkg/blobstore/sharding", visibility = ["//visibility:public"], @@ -16,7 +16,6 @@ go_library( "//pkg/digest", "//pkg/util", "@bazel_remote_apis//build/bazel/remote/execution/v2:remote_execution_go_proto", - "@com_github_lazybeaver_xorshift//:xorshift", "@org_golang_x_sync//errgroup", ], ) @@ -24,13 +23,12 @@ go_library( go_test( name = "sharding_test", srcs = [ + "rendezvous_shard_selector_test.go", "sharding_blob_access_test.go", - "weighted_shard_permuter_test.go", ], deps = [ ":sharding", "//internal/mock", - "//pkg/blobstore", "//pkg/blobstore/buffer", "//pkg/digest", "//pkg/testutil", diff --git a/pkg/blobstore/sharding/integration/BUILD.bazel b/pkg/blobstore/sharding/integration/BUILD.bazel new file mode 100644 index 000000000..0fe200e50 --- /dev/null +++ b/pkg/blobstore/sharding/integration/BUILD.bazel @@ -0,0 +1,25 @@ +load("@rules_go//go:def.bzl", "go_test") + +go_test( + name = "integration", + srcs = ["benchmarking_integration_test.go"], + data = ["//cmd/bb_storage"], + deps = [ + "@bazel_remote_apis//build/bazel/remote/execution/v2:remote_execution_go_proto", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//credentials/insecure", + "@rules_go//go/runfiles", + ], +) + +go_test( + name = "integration_test", + srcs = ["benchmarking_integration_test.go"], + deps = [ + "@bazel_remote_apis//build/bazel/remote/execution/v2:remote_execution_go_proto", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//credentials/insecure", + "@rules_go//go/runfiles", + ], +) diff --git a/pkg/blobstore/sharding/integration/benchmarking_integration_test.go b/pkg/blobstore/sharding/integration/benchmarking_integration_test.go new file mode 100644 index 000000000..81d74e384 --- /dev/null +++ b/pkg/blobstore/sharding/integration/benchmarking_integration_test.go @@ -0,0 +1,289 @@ +package integration + +import ( + "context" + "crypto/md5" + "encoding/binary" + "fmt" + "net" + "os" + "os/exec" + "testing" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/bazelbuild/rules_go/go/runfiles" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const storageConfig = ` +{ + grpcServers: [{ + listenAddresses: [':8981'], + authenticationPolicy: { allow: {} }, + }], + maximumMessageSizeBytes: 4 * 1024 * 1024, + contentAddressableStorage: { + backend: { + 'local': { + keyLocationMapInMemory: { entries: 16 }, + keyLocationMapMaximumGetAttempts: 32, + keyLocationMapMaximumPutAttempts: 64, + oldBlocks: 8, + currentBlocks: 24, + newBlocks: 3, + blocksInMemory: { blockSizeBytes: 32 }, + }, + }, + getAuthorizer: { allow: {} }, + putAuthorizer: { allow: {} }, + findMissingAuthorizer: { allow: {} }, + }, +} +` + +const frontendConfig = ` +local shardCount = std.parseInt(std.extVar('SHARD_COUNT')); +{ + grpcServers: [{ + listenAddresses: [':8980'], + authenticationPolicy: { allow: {} }, + }], + maximumMessageSizeBytes: 4 * 1024 * 1024, + contentAddressableStorage: { + backend: { + sharding: { + shards: { + [std.toString(i)]: { + weight: 1, + backend: { grpc: { address: 'localhost:8981' } }, + } + for i in std.range(0, shardCount - 1) + } + }, + }, + getAuthorizer: { allow: {} }, + putAuthorizer: { allow: {} }, + findMissingAuthorizer: { allow: {} }, + }, +} +` + +const legacyConfig = ` +local shardCount = std.parseInt(std.extVar('SHARD_COUNT')); +{ + grpcServers: [{ + listenAddresses: [':8980'], + authenticationPolicy: { allow: {} }, + }], + maximumMessageSizeBytes: 4 * 1024 * 1024, + contentAddressableStorage: { + backend: { + sharding: { + shards: { + [std.toString(i)]: { + weight: 1, + backend: { grpc: { address: 'localhost:8981' } }, + } + for i in std.range(0, shardCount - 1) + }, + legacy: { + shardOrder: [ + std.toString(i) + for i in std.range(0, shardCount - 1) + ] + } + }, + }, + getAuthorizer: { allow: {} }, + putAuthorizer: { allow: {} }, + findMissingAuthorizer: { allow: {} }, + }, +} +` + +// waitForTCP repeatedly tries to establish a TCP connection to addr until timeout. +func waitForTCP(addr string, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + conn, err := net.Dial("tcp", addr) + if err == nil { + conn.Close() + return nil + } + time.Sleep(10 * time.Millisecond) + } + return fmt.Errorf("timeout waiting for TCP %s", addr) +} + +func calcMd5(n uint32) string { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(n)) + hash := md5.Sum(b) + return fmt.Sprintf("%x", hash) +} + +func callFMB(addr string) error { + conn, err := net.Dial("tcp", addr) + if err != nil { + return err + } + defer conn.Close() + + // Send a command to server F. + _, err = conn.Write([]byte("ping")) + if err != nil { + return err + } + + // Read the response (for example purposes, we ignore its content). + buf := make([]byte, 1024) + _, err = conn.Read(buf) + return err +} + +func writeConfigFile(name, content string) (file *os.File, err error) { + if file, err = os.CreateTemp("", name); err != nil { + return nil, err + } + if _, err = file.WriteString(content); err != nil { + return nil, err + } + if err = file.Close(); err != nil { + return nil, err + } + return file, nil +} + +type component struct { + name string + config string +} + +func performBenchmark(b *testing.B, components []component, shardCount int) { + const digestCount = 1000 + rf, err := runfiles.New() + if err != nil { + b.Fatalf("failed to intialize runfiles: %v", err) + } + bbStoragePath, err := rf.Rlocation("_main/cmd/bb_storage/bb_storage_/bb_storage") + if err != nil { + b.Fatalf("failed to find runfiles: %v", err) + } + for _, component := range components { + file, err := writeConfigFile(component.name, component.config) + if err != nil { + b.Fatalf("failed to write config file for %q: %v", component.name, err) + } + defer os.Remove(file.Name()) + cmd := exec.Command(bbStoragePath, file.Name()) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = append(os.Environ(), fmt.Sprintf("SHARD_COUNT=%d", shardCount)) + if err := cmd.Start(); err != nil { + b.Fatalf("failed to start component %q: %v", component.name, err) + } + defer func() { + cmd.Process.Kill() + cmd.Wait() + }() + } + + if err := waitForTCP("127.0.0.1:8980", 5*time.Second); err != nil { + b.Fatalf("frontend did not start in time: %v", err) + } + + digests := make([]*remoteexecution.Digest, digestCount) + for i := 0; i < digestCount; i++ { + digests[i] = &remoteexecution.Digest{ + Hash: calcMd5(uint32(i)), + SizeBytes: 4, + } + } + req := &remoteexecution.FindMissingBlobsRequest{ + BlobDigests: digests, + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + ctx := context.Background() + conn, err := grpc.NewClient("127.0.0.1:8980", grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + b.Fatalf("failed to connect to frontend: %v", err) + } + defer conn.Close() + client := remoteexecution.NewContentAddressableStorageClient(conn) + for pb.Next() { + _, err = client.FindMissingBlobs(ctx, req) + if err != nil { + b.Fatalf("failed to call FindMissingBlobs: %v", err) + } + } + }) + b.StopTimer() +} + +func BenchmarkSharding10(b *testing.B) { + components := []component{ + {name: "storage", config: storageConfig}, + {name: "frontend", config: frontendConfig}, + } + performBenchmark(b, components, 10) +} + +func BenchmarkSharding100(b *testing.B) { + components := []component{ + {name: "storage", config: storageConfig}, + {name: "frontend", config: frontendConfig}, + } + performBenchmark(b, components, 100) +} + +func BenchmarkSharding1000(b *testing.B) { + components := []component{ + {name: "storage", config: storageConfig}, + {name: "frontend", config: frontendConfig}, + } + performBenchmark(b, components, 1000) +} + +func BenchmarkSharding10000(b *testing.B) { + components := []component{ + {name: "storage", config: storageConfig}, + {name: "frontend", config: frontendConfig}, + } + performBenchmark(b, components, 10000) +} + +func BenchmarkLegacy10(b *testing.B) { + components := []component{ + {name: "storage", config: storageConfig}, + {name: "legacy", config: legacyConfig}, + } + performBenchmark(b, components, 10) +} + +func BenchmarkLegacy100(b *testing.B) { + components := []component{ + {name: "storage", config: storageConfig}, + {name: "legacy", config: legacyConfig}, + } + performBenchmark(b, components, 100) +} + +func BenchmarkLegacy1000(b *testing.B) { + components := []component{ + {name: "storage", config: storageConfig}, + {name: "legacy", config: legacyConfig}, + } + performBenchmark(b, components, 1000) +} + +func BenchmarkLegacy10000(b *testing.B) { + components := []component{ + {name: "storage", config: storageConfig}, + {name: "legacy", config: legacyConfig}, + } + performBenchmark(b, components, 10000) +} diff --git a/pkg/blobstore/sharding/legacy/BUILD.bazel b/pkg/blobstore/sharding/legacy/BUILD.bazel new file mode 100644 index 000000000..48e11aac0 --- /dev/null +++ b/pkg/blobstore/sharding/legacy/BUILD.bazel @@ -0,0 +1,43 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "legacy", + srcs = [ + "shard_permuter.go", + "sharding_blob_access.go", + "weighted_shard_permuter.go", + ], + importpath = "github.com/buildbarn/bb-storage/pkg/blobstore/sharding/legacy", + visibility = ["//visibility:public"], + deps = [ + "//pkg/blobstore", + "//pkg/blobstore/buffer", + "//pkg/blobstore/slicing", + "//pkg/digest", + "//pkg/util", + "@bazel_remote_apis//build/bazel/remote/execution/v2:remote_execution_go_proto", + "@com_github_lazybeaver_xorshift//:xorshift", + "@org_golang_x_sync//errgroup", + ], +) + +go_test( + name = "legacy_test", + srcs = [ + "sharding_blob_access_test.go", + "weighted_shard_permuter_test.go", + ], + deps = [ + ":legacy", + "//internal/mock", + "//pkg/blobstore", + "//pkg/blobstore/buffer", + "//pkg/digest", + "//pkg/testutil", + "@bazel_remote_apis//build/bazel/remote/execution/v2:remote_execution_go_proto", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_uber_go_mock//gomock", + ], +) diff --git a/pkg/blobstore/sharding/legacy/README.md b/pkg/blobstore/sharding/legacy/README.md new file mode 100644 index 000000000..58a185794 --- /dev/null +++ b/pkg/blobstore/sharding/legacy/README.md @@ -0,0 +1,11 @@ +This folder contains the legacy implementation of sharding blob access +configuration. Care has been taken to leave this code contained and reasonably +unmodified + +The purpose of keeping the legacy sharding implementation around is to simplify +switching from the old sharding implementation to the new implementation, +components can be switched to the new sharding implementation with a fallback to +the old implementation. + +Consumers are expected to switch in a timely manner, at some point this entire +folder will be deleted. \ No newline at end of file diff --git a/pkg/blobstore/sharding/legacy/shard_permuter.go b/pkg/blobstore/sharding/legacy/shard_permuter.go new file mode 100644 index 000000000..8ce45f63c --- /dev/null +++ b/pkg/blobstore/sharding/legacy/shard_permuter.go @@ -0,0 +1,18 @@ +package legacy + +// ShardSelector is the callback type called by ShardPermuter.GetShard. It is +// invoked until false is returned, providing a backend index number for every +// call. +type ShardSelector func(int) bool + +// ShardPermuter is an algorithm for turning a hash into a series of indices +// corresponding to backends capable of serving blobs corresponding with that +// hash. +// +// As backends may be unavailable (e.g., drained) or replication strategies may +// be applied to duplicate blobs, it is important that an actual permutation is +// returned to ensure every backend is given a chance. It is permitted to +// spuriously generate the same index multiple times. +type ShardPermuter interface { + GetShard(hash uint64, selector ShardSelector) +} diff --git a/pkg/blobstore/sharding/legacy/sharding_blob_access.go b/pkg/blobstore/sharding/legacy/sharding_blob_access.go new file mode 100644 index 000000000..6ad85ddbc --- /dev/null +++ b/pkg/blobstore/sharding/legacy/sharding_blob_access.go @@ -0,0 +1,134 @@ +package legacy + +import ( + "context" + "sync/atomic" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/blobstore/slicing" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + + "golang.org/x/sync/errgroup" +) + +type shardingBlobAccess struct { + backends []blobstore.BlobAccess + shardPermuter ShardPermuter + hashInitialization uint64 + getCapabilitiesRound atomic.Uint64 +} + +// NewShardingBlobAccess is an adapter for BlobAccess that partitions requests +// across backends by hashing the digest. A ShardPermuter is used to map hashes +// to backends. +func NewShardingBlobAccess(backends []blobstore.BlobAccess, shardPermuter ShardPermuter, hashInitialization uint64) blobstore.BlobAccess { + return &shardingBlobAccess{ + backends: backends, + shardPermuter: shardPermuter, + hashInitialization: hashInitialization, + } +} + +func (ba *shardingBlobAccess) getBackendIndexByDigest(blobDigest digest.Digest) int { + // Hash the key using FNV-1a. + h := ba.hashInitialization + for _, c := range blobDigest.GetKey(digest.KeyWithoutInstance) { + h ^= uint64(c) + h *= 1099511628211 + } + return ba.getBackendIndexByHash(h) +} + +func (ba *shardingBlobAccess) getBackendIndexByHash(h uint64) int { + // Keep requesting shards until matching one that is undrained. + var selectedIndex int + ba.shardPermuter.GetShard(h, func(index int) bool { + if ba.backends[index] == nil { + return true + } + selectedIndex = index + return false + }) + return selectedIndex +} + +func (ba *shardingBlobAccess) Get(ctx context.Context, digest digest.Digest) buffer.Buffer { + index := ba.getBackendIndexByDigest(digest) + return buffer.WithErrorHandler( + ba.backends[index].Get(ctx, digest), + shardIndexAddingErrorHandler{index: index}) +} + +func (ba *shardingBlobAccess) GetFromComposite(ctx context.Context, parentDigest, childDigest digest.Digest, slicer slicing.BlobSlicer) buffer.Buffer { + index := ba.getBackendIndexByDigest(parentDigest) + return buffer.WithErrorHandler( + ba.backends[index].GetFromComposite(ctx, parentDigest, childDigest, slicer), + shardIndexAddingErrorHandler{index: index}) +} + +func (ba *shardingBlobAccess) Put(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + index := ba.getBackendIndexByDigest(digest) + if err := ba.backends[index].Put(ctx, digest, b); err != nil { + return util.StatusWrapf(err, "Shard %d", index) + } + return nil +} + +func (ba *shardingBlobAccess) FindMissing(ctx context.Context, digests digest.Set) (digest.Set, error) { + // Partition all digests by shard. + digestsPerBackend := make([]digest.SetBuilder, 0, len(ba.backends)) + for range ba.backends { + digestsPerBackend = append(digestsPerBackend, digest.NewSetBuilder()) + } + for _, blobDigest := range digests.Items() { + digestsPerBackend[ba.getBackendIndexByDigest(blobDigest)].Add(blobDigest) + } + + // Asynchronously call FindMissing() on backends. + missingPerBackend := make([]digest.Set, 0, len(ba.backends)) + group, ctxWithCancel := errgroup.WithContext(ctx) + for indexIter, digestsIter := range digestsPerBackend { + index, digests := indexIter, digestsIter + if digests.Length() > 0 { + missingPerBackend = append(missingPerBackend, digest.EmptySet) + missingOut := &missingPerBackend[len(missingPerBackend)-1] + group.Go(func() error { + missing, err := ba.backends[index].FindMissing(ctxWithCancel, digests.Build()) + if err != nil { + return util.StatusWrapf(err, "Shard %d", index) + } + *missingOut = missing + return nil + }) + } + } + + // Recombine results. + if err := group.Wait(); err != nil { + return digest.EmptySet, err + } + return digest.GetUnion(missingPerBackend), nil +} + +func (ba *shardingBlobAccess) GetCapabilities(ctx context.Context, instanceName digest.InstanceName) (*remoteexecution.ServerCapabilities, error) { + // Spread requests across shards. + index := ba.getBackendIndexByHash(ba.getCapabilitiesRound.Add(1)) + capabilities, err := ba.backends[index].GetCapabilities(ctx, instanceName) + if err != nil { + return nil, util.StatusWrapf(err, "Shard %d", index) + } + return capabilities, nil +} + +type shardIndexAddingErrorHandler struct { + index int +} + +func (eh shardIndexAddingErrorHandler) OnError(err error) (buffer.Buffer, error) { + return nil, util.StatusWrapf(err, "Shard %d", eh.index) +} + +func (eh shardIndexAddingErrorHandler) Done() {} diff --git a/pkg/blobstore/sharding/legacy/sharding_blob_access_test.go b/pkg/blobstore/sharding/legacy/sharding_blob_access_test.go new file mode 100644 index 000000000..631a3287b --- /dev/null +++ b/pkg/blobstore/sharding/legacy/sharding_blob_access_test.go @@ -0,0 +1,194 @@ +package legacy_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/internal/mock" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/blobstore/sharding/legacy" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.uber.org/mock/gomock" +) + +func TestShardingBlobAccess(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + shard0 := mock.NewMockBlobAccess(ctrl) + shard1 := mock.NewMockBlobAccess(ctrl) + shardPermuter := mock.NewMockShardPermuter(ctrl) + blobAccess := legacy.NewShardingBlobAccess( + []blobstore.BlobAccess{ + shard0, + shard1, + nil, // Shard that is explicitly drained. + }, + shardPermuter, + /* hashInitialization = */ 0x62994904405896a1) + + helloDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 5) + llDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "5b54c0a045f179bcbbbc9abcb8b5cd4c", 2) + + t.Run("GetFailure", func(t *testing.T) { + // Errors should be prefixed with a shard number. + shardPermuter.EXPECT().GetShard(uint64(0x7118d6877ee9ee3d), gomock.Any()).Do( + func(hash uint64, selector legacy.ShardSelector) { + require.True(t, selector(2)) + require.False(t, selector(1)) + }) + shard1.EXPECT().Get(ctx, helloDigest). + Return(buffer.NewBufferFromError(status.Error(codes.Unavailable, "Server offline"))) + + _, err := blobAccess.Get(ctx, helloDigest).ToByteSlice(1000) + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Shard 1: Server offline"), err) + }) + + t.Run("GetSuccess", func(t *testing.T) { + shardPermuter.EXPECT().GetShard(uint64(0x7118d6877ee9ee3d), gomock.Any()).Do( + func(hash uint64, selector legacy.ShardSelector) { + require.False(t, selector(0)) + }) + shard0.EXPECT().Get(ctx, helloDigest). + Return(buffer.NewValidatedBufferFromByteSlice([]byte("Hello"))) + + data, err := blobAccess.Get(ctx, helloDigest).ToByteSlice(1000) + require.NoError(t, err) + require.Equal(t, []byte("Hello"), data) + }) + + t.Run("GetFromCompositeSuccess", func(t *testing.T) { + // For reads from composite objects, the sharding needs + // to be based on the parent digest. That digest was + // used to upload the object to storage. + shardPermuter.EXPECT().GetShard(uint64(0x7118d6877ee9ee3d), gomock.Any()).Do( + func(hash uint64, selector legacy.ShardSelector) { + require.False(t, selector(0)) + }) + slicer := mock.NewMockBlobSlicer(ctrl) + shard0.EXPECT().GetFromComposite(ctx, helloDigest, llDigest, slicer). + Return(buffer.NewValidatedBufferFromByteSlice([]byte("ll"))) + + data, err := blobAccess.GetFromComposite(ctx, helloDigest, llDigest, slicer).ToByteSlice(1000) + require.NoError(t, err) + require.Equal(t, []byte("ll"), data) + }) + + t.Run("PutFailure", func(t *testing.T) { + // Errors should be prefixed with a shard number. + shardPermuter.EXPECT().GetShard(uint64(0x7118d6877ee9ee3d), gomock.Any()).Do( + func(hash uint64, selector legacy.ShardSelector) { + require.True(t, selector(2)) + require.False(t, selector(1)) + }) + shard1.EXPECT().Put(ctx, helloDigest, gomock.Any()).DoAndReturn( + func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + b.Discard() + return status.Error(codes.Unavailable, "Server offline") + }) + + testutil.RequireEqualStatus( + t, + status.Error(codes.Unavailable, "Shard 1: Server offline"), + blobAccess.Put(ctx, helloDigest, buffer.NewValidatedBufferFromByteSlice([]byte("Hello")))) + }) + + t.Run("PutSuccess", func(t *testing.T) { + shardPermuter.EXPECT().GetShard(uint64(0x7118d6877ee9ee3d), gomock.Any()).Do( + func(hash uint64, selector legacy.ShardSelector) { + require.False(t, selector(0)) + }) + shard0.EXPECT().Put(ctx, helloDigest, gomock.Any()).DoAndReturn( + func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + data, err := b.ToByteSlice(1000) + require.NoError(t, err) + require.Equal(t, []byte("Hello"), data) + return nil + }) + + require.NoError(t, blobAccess.Put(ctx, helloDigest, buffer.NewValidatedBufferFromByteSlice([]byte("Hello")))) + }) + + digest1 := digest.MustNewDigest("", remoteexecution.DigestFunction_MD5, "21f843aefbfb88627ec2cad9e8f1f49a", 1) + digest2 := digest.MustNewDigest("", remoteexecution.DigestFunction_MD5, "48f2503cf369373b0631da97fb9de1c1", 2) + digest3 := digest.MustNewDigest("", remoteexecution.DigestFunction_MD5, "942a5b4164c26ae5d57a4f9526dcfca4", 3) + digest4 := digest.MustNewDigest("", remoteexecution.DigestFunction_MD5, "f8f3da00ff2862082bddbb15300343f6", 4) + + t.Run("FindMissingFailure", func(t *testing.T) { + // Digests provided to FindMissing() are partitioned, + // causing up to one call per backend. If one of the + // backends reports failure, we immediately cancel the + // context for remaining requests, and return the first + // error that occurred. + shardPermuter.EXPECT().GetShard(uint64(0xe4780eee2c3e5c4d), gomock.Any()).Do( + func(hash uint64, selector legacy.ShardSelector) { + require.False(t, selector(0)) + }) + shardPermuter.EXPECT().GetShard(uint64(0xb1e63d21c14e3f12), gomock.Any()).Do( + func(hash uint64, selector legacy.ShardSelector) { + require.False(t, selector(1)) + }) + shard0.EXPECT().FindMissing( + gomock.Any(), + digest1.ToSingletonSet(), + ).Return(digest.EmptySet, status.Error(codes.Unavailable, "Server offline")) + shard1.EXPECT().FindMissing( + gomock.Any(), + digest2.ToSingletonSet(), + ).DoAndReturn(func(ctx context.Context, digests digest.Set) (digest.Set, error) { + <-ctx.Done() + require.Equal(t, context.Canceled, ctx.Err()) + return digest.EmptySet, status.Error(codes.Canceled, "Context canceled") + }) + + _, err := blobAccess.FindMissing( + ctx, + digest.NewSetBuilder().Add(digest1).Add(digest2).Build(), + ) + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Shard 0: Server offline"), err) + }) + + t.Run("FindMissingSuccess", func(t *testing.T) { + shardPermuter.EXPECT().GetShard(uint64(0xe4780eee2c3e5c4d), gomock.Any()).Do( + func(hash uint64, selector legacy.ShardSelector) { + require.False(t, selector(0)) + }) + shardPermuter.EXPECT().GetShard(uint64(0xb1e63d21c14e3f12), gomock.Any()).Do( + func(hash uint64, selector legacy.ShardSelector) { + require.False(t, selector(0)) + }) + shardPermuter.EXPECT().GetShard(uint64(0x71fb8268edc4f6e9), gomock.Any()).Do( + func(hash uint64, selector legacy.ShardSelector) { + require.False(t, selector(1)) + }) + shardPermuter.EXPECT().GetShard(uint64(0xc7a206e6fcdfda55), gomock.Any()).Do( + func(hash uint64, selector legacy.ShardSelector) { + require.False(t, selector(1)) + }) + shard0.EXPECT().FindMissing( + gomock.Any(), + digest.NewSetBuilder().Add(digest1).Add(digest2).Build(), + ).Return(digest1.ToSingletonSet(), nil) + shard1.EXPECT().FindMissing( + gomock.Any(), + digest.NewSetBuilder().Add(digest3).Add(digest4).Build(), + ).Return(digest3.ToSingletonSet(), nil) + + missing, err := blobAccess.FindMissing( + ctx, + digest.NewSetBuilder(). + Add(digest1).Add(digest2). + Add(digest3).Add(digest4). + Build(), + ) + require.NoError(t, err) + require.Equal(t, digest.NewSetBuilder().Add(digest1).Add(digest3).Build(), missing) + }) +} diff --git a/pkg/blobstore/sharding/weighted_shard_permuter.go b/pkg/blobstore/sharding/legacy/weighted_shard_permuter.go similarity index 98% rename from pkg/blobstore/sharding/weighted_shard_permuter.go rename to pkg/blobstore/sharding/legacy/weighted_shard_permuter.go index c2dafe2d1..16da45b88 100644 --- a/pkg/blobstore/sharding/weighted_shard_permuter.go +++ b/pkg/blobstore/sharding/legacy/weighted_shard_permuter.go @@ -1,4 +1,4 @@ -package sharding +package legacy import ( "sort" diff --git a/pkg/blobstore/sharding/weighted_shard_permuter_test.go b/pkg/blobstore/sharding/legacy/weighted_shard_permuter_test.go similarity index 83% rename from pkg/blobstore/sharding/weighted_shard_permuter_test.go rename to pkg/blobstore/sharding/legacy/weighted_shard_permuter_test.go index c055cc5e0..29e1be66c 100644 --- a/pkg/blobstore/sharding/weighted_shard_permuter_test.go +++ b/pkg/blobstore/sharding/legacy/weighted_shard_permuter_test.go @@ -1,16 +1,16 @@ -package sharding_test +package legacy_test import ( "testing" - "github.com/buildbarn/bb-storage/pkg/blobstore/sharding" + "github.com/buildbarn/bb-storage/pkg/blobstore/sharding/legacy" "github.com/stretchr/testify/require" ) func TestWeightedShardPermuterDistribution(t *testing.T) { // Distribution across five backends with a total weight of 15. weights := []uint32{1, 4, 2, 5, 3} - s := sharding.NewWeightedShardPermuter(weights) + s := legacy.NewWeightedShardPermuter(weights) // Request a very long series of backends where a digest may be placed. occurrences := map[int]uint32{} diff --git a/pkg/blobstore/sharding/rendezvous_shard_selector.go b/pkg/blobstore/sharding/rendezvous_shard_selector.go new file mode 100644 index 000000000..0ac05237c --- /dev/null +++ b/pkg/blobstore/sharding/rendezvous_shard_selector.go @@ -0,0 +1,155 @@ +package sharding + +import ( + "crypto/sha256" + "encoding/binary" + "fmt" + "math/bits" + "sort" +) + +type rendezvousShard struct { + weight uint32 + index int + hash uint64 +} + +type rendezvousShardSelector struct { + shards []rendezvousShard +} + +func hashServer(key string) uint64 { + h := sha256.Sum256([]byte(key)) + return binary.BigEndian.Uint64(h[:8]) +} + +// NewRendezvousShardSelector performs shard selection using the Rendezvous +// Hashing algorithm. The algorithm distributes blobs over the shard +// proportional to the shards weight, it fullfils all required properties of the +// ShardSelector interface: +// - Reordering the shards will not affect the chosen shard. +// - Removing a shard is guaranteed to only affect blobs that would have +// resolved to the removed shard. +// - Adding a shard will only affect that blobs resolve to the new shard. +func NewRendezvousShardSelector(shards []Shard) (ShardSelector, error) { + if len(shards) == 0 { + return nil, fmt.Errorf("RendezvousShardSelector must have shards to be defined") + } + internalShards := make([]rendezvousShard, 0, len(shards)) + keyMap := make(map[uint64]string, len(shards)) + for index, shard := range shards { + hash := hashServer(shard.Key) + if collision, exists := keyMap[hash]; exists { + return nil, fmt.Errorf("hash collision between shards: %s and %s", shard.Key, collision) + } + keyMap[hash] = shard.Key + internalShards = append(internalShards, rendezvousShard{ + index: index, + weight: shard.Weight, + hash: hash, + }) + } + sort.Slice(internalShards, func(i, j int) bool { + return internalShards[i].hash < internalShards[j].hash + }) + return &rendezvousShardSelector{shards: internalShards}, nil +} + +func score(x uint64, weight uint32) uint64 { + // The mathematical formula we are approximating is -weight/log(X) where X + // is a uniform random number between ]0,1[. For stability and performance + // reasons we are foregoing any floating point operations and approximating + // the logarithm. + // + // Since we are interested in the relative ordering rather than the absolute + // value of the score we can pick log2 as our desired implementation. Log2 + // is simple to approximate numerically. + // + // x is already random and uniform, we can turn it into a number between 0 + // (inclusive) and 1 (exclusive) by simply dividing by MaxUint64+1. By the + // properties of the logarithm we can simplify -log2(x/(MaxUint64+1)) to + // log2(MaxUint64+1)-log2(x), which will be 64-log2(x) + logFixed := uint64(64)<<16 - Log2Fixed(x) + // Replace weight with fixed point representation of weight. We're not using + // floating point math so we relative size of the weight to be as big as + // possible compared to the log. Since weight is 32 bit it is safe to shift + // it by an additional 32 bits. + weightFixed := uint64(weight) << 32 + return weightFixed / logFixed +} + +const ( + lutEntryBits = 6 +) + +// Lookup table used for the log2 fraction, it is a fixed point representation +// of log2(x) for x between [1,2] which is a a value between 0 and 1. It uses 16 +// bits of precision containing 1<> 1) + bitfield := x << (64 - msb) + index := bitfield >> (64 - lutEntryBits) + interp := bitfield << lutEntryBits >> 16 + base := lut[index] + next := lut[index+1] + delta := uint64(next - base) + frac := uint64(base)<<48 + (delta * interp) + return (uint64(msb) << 16) | uint64(frac)>>48 +} + +// A very fast PRNG with strong mixing properties +func splitmix64(x uint64) uint64 { + x ^= x >> 30 + x *= 0xbf58476d1ce4e5b9 + x ^= x >> 27 + x *= 0x94d049bb133111eb + x ^= x >> 31 + return x +} + +func (s *rendezvousShardSelector) GetShard(hash uint64) int { + var best uint64 + var bestIndex int + for _, shard := range s.shards { + mixed := splitmix64(shard.hash ^ hash) + current := score(mixed, shard.weight) + if current > best { + best = current + bestIndex = shard.index + } + } + return bestIndex +} diff --git a/pkg/blobstore/sharding/rendezvous_shard_selector_test.go b/pkg/blobstore/sharding/rendezvous_shard_selector_test.go new file mode 100644 index 000000000..f4c1b61e7 --- /dev/null +++ b/pkg/blobstore/sharding/rendezvous_shard_selector_test.go @@ -0,0 +1,104 @@ +package sharding_test + +import ( + "fmt" + "math" + "strconv" + "testing" + + "github.com/buildbarn/bb-storage/pkg/blobstore/sharding" + "github.com/stretchr/testify/require" +) + +func TestLog2Fixed(t *testing.T) { + bits := 16 + // test all powers of 2 (answer should be exact) + for i := 0; i < 64; i++ { + expected := uint64(i) << bits + actual := sharding.Log2Fixed(uint64(1) << i) + require.Equal(t, expected, actual, "Power of two should give exact result") + } + // test numbers < 100_000, expect less than 0.01% relative error from true result + for i := 2; i < 100_000; i++ { + expected := math.Log2(float64(i)) + actual := float64(sharding.Log2Fixed(uint64(i))) / math.Pow(2, float64(bits)) + require.InEpsilon(t, expected, actual, 1e-5, fmt.Sprintf("Error is too high for %d", i)) + } +} + +func TestRendezvousShardSelectorDistribution(t *testing.T) { + const COUNT = 10_000_000 + precomputedResults := [20]int{3, 2, 0, 3, 3, 3, 0, 0, 1, 3, 0, 3, 1, 2, 2, 2, 3, 3, 1, 3} + precomputedOccurrences := [5]int{668687, 1332248, 2666353, 4666342, 666370} + // Distribution across multiple backends + weights := []sharding.Shard{ + {Key: "a", Weight: 1}, + {Key: "b", Weight: 2}, + {Key: "c", Weight: 4}, + {Key: "d", Weight: 7}, + {Key: "e", Weight: 1}, + } + s, err := sharding.NewRendezvousShardSelector(weights) + require.NoError(t, err, "Selector construction should succeed") + results := make([]int, len(precomputedResults)) + occurrences := make([]int, len(weights)) + + // Request the shard for a very large amount of blobs + for i := 0; i < COUNT; i++ { + result := s.GetShard(uint64(i)) + if i < len(results) { + results[i] = result + } + occurrences[result] += 1 + } + + t.Run("Distribution Error", func(t *testing.T) { + // Requests should be fanned out with a small error margin. + weightSum := uint32(0) + for _, shard := range weights { + weightSum += shard.Weight + } + for index, shard := range weights { + require.InEpsilon(t, shard.Weight*COUNT/weightSum, occurrences[index], 1e-2) + } + }) + + t.Run("Distribution Shape", func(t *testing.T) { + shapeError := "The sharding algorithm has produced unexpected results, changing this distribution is a breaking change to buildbarn" + require.Equal(t, precomputedResults[:], results, shapeError) + require.Equal(t, precomputedOccurrences[:], occurrences, shapeError) + }) + + t.Run("Stability Test", func(t *testing.T) { + // Removing a shard should only affect the shard that is removed + results = make([]int, 10000) + for i := 0; i < len(results); i++ { + results[i] = s.GetShard(uint64(i)) + } + // drop the last shard in the slice + weightsSubset := weights[:len(weights)-1] + sharder, err := sharding.NewRendezvousShardSelector(weightsSubset) + require.NoError(t, err, "Selector construction should succeed") + for i := 0; i < len(results); i++ { + result := sharder.GetShard(uint64(i)) + if results[i] == len(weights)-1 { + continue + } + // result should be unchanged for all slices which did not resolve + // to the dropped one + require.Equal(t, results[i], result, "Dropping a shard should not affect other shards") + } + }) +} + +func BenchmarkRendezvousShardSelector(b *testing.B) { + SHARD_COUNT := 1000 + weights := make([]sharding.Shard, 0, SHARD_COUNT) + for i := 0; i < SHARD_COUNT; i++ { + weights = append(weights, sharding.Shard{Key: strconv.Itoa(i), Weight: uint32(i)}) + } + s, _ := sharding.NewRendezvousShardSelector(weights) + for i := 0; i < b.N; i++ { + s.GetShard(uint64(i)) + } +} diff --git a/pkg/blobstore/sharding/shard_permuter.go b/pkg/blobstore/sharding/shard_permuter.go deleted file mode 100644 index 81cc6c100..000000000 --- a/pkg/blobstore/sharding/shard_permuter.go +++ /dev/null @@ -1,19 +0,0 @@ -package sharding - -// ShardSelector is the callback type called by ShardPermuter.GetShard. -// It is invoked until false is returned, providing a backend index -// number for every call. -type ShardSelector func(int) bool - -// ShardPermuter is an algorithm for turning a hash into a series of -// indices corresponding to backends capable of serving blobs -// corresponding with that hash. -// -// As backends may be unavailable (e.g., drained) or replication -// strategies may be applied to duplicate blobs, it is important that an -// actual permutation is returned to ensure every backend is given a -// chance. It is permitted to spuriously generate the same index -// multiple times. -type ShardPermuter interface { - GetShard(hash uint64, selector ShardSelector) -} diff --git a/pkg/blobstore/sharding/shard_selector.go b/pkg/blobstore/sharding/shard_selector.go new file mode 100644 index 000000000..33d076e28 --- /dev/null +++ b/pkg/blobstore/sharding/shard_selector.go @@ -0,0 +1,20 @@ +package sharding + +// ShardSelector is an algorithm that for a hash resolves into an index which +// corresponds to the specific backend for that shard. +// +// The algorithm must be stable, the removal of an unavailable backend should +// not result in the reshuffling of any other blobs. It must also be +// numerically stable so that it produces the same result no matter the +// architecture. +type ShardSelector interface { + GetShard(hash uint64) int +} + +// Shard is a description of a shard. The shard selector will resolve to the +// same shard independent of the order of shards, but the returned index will +// correspond to the index sent to the ShardSelectors constructor. +type Shard struct { + Key string + Weight uint32 +} diff --git a/pkg/blobstore/sharding/sharding_blob_access.go b/pkg/blobstore/sharding/sharding_blob_access.go index c53039a8a..9301bce56 100644 --- a/pkg/blobstore/sharding/sharding_blob_access.go +++ b/pkg/blobstore/sharding/sharding_blob_access.go @@ -2,6 +2,7 @@ package sharding import ( "context" + "encoding/binary" "sync/atomic" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" @@ -15,64 +16,53 @@ import ( ) type shardingBlobAccess struct { - backends []blobstore.BlobAccess - shardPermuter ShardPermuter - hashInitialization uint64 + backends []ShardBackend + shardSelector ShardSelector getCapabilitiesRound atomic.Uint64 } +// ShardBackend is the Backend together with its key, the key is used for error +// messages. +type ShardBackend struct { + Backend blobstore.BlobAccess + Key string +} + // NewShardingBlobAccess is an adapter for BlobAccess that partitions -// requests across backends by hashing the digest. A ShardPermuter is +// requests across backends by hashing the digest. A ShardSelector is // used to map hashes to backends. -func NewShardingBlobAccess(backends []blobstore.BlobAccess, shardPermuter ShardPermuter, hashInitialization uint64) blobstore.BlobAccess { +func NewShardingBlobAccess(backends []ShardBackend, shardSelector ShardSelector) blobstore.BlobAccess { return &shardingBlobAccess{ - backends: backends, - shardPermuter: shardPermuter, - hashInitialization: hashInitialization, + backends: backends, + shardSelector: shardSelector, } } func (ba *shardingBlobAccess) getBackendIndexByDigest(blobDigest digest.Digest) int { - // Hash the key using FNV-1a. - h := ba.hashInitialization - for _, c := range blobDigest.GetKey(digest.KeyWithoutInstance) { - h ^= uint64(c) - h *= 1099511628211 - } - return ba.getBackendIndexByHash(h) -} - -func (ba *shardingBlobAccess) getBackendIndexByHash(h uint64) int { - // Keep requesting shards until matching one that is undrained. - var selectedIndex int - ba.shardPermuter.GetShard(h, func(index int) bool { - if ba.backends[index] == nil { - return true - } - selectedIndex = index - return false - }) - return selectedIndex + // Use the first 8 bytes of the digest hash for calculating backend. + hb := blobDigest.GetHashBytes() + h := binary.BigEndian.Uint64(hb[:8]) + return ba.shardSelector.GetShard(h) } func (ba *shardingBlobAccess) Get(ctx context.Context, digest digest.Digest) buffer.Buffer { index := ba.getBackendIndexByDigest(digest) return buffer.WithErrorHandler( - ba.backends[index].Get(ctx, digest), - shardIndexAddingErrorHandler{index: index}) + ba.backends[index].Backend.Get(ctx, digest), + shardKeyAddingErrorHandler{key: ba.backends[index].Key}) } func (ba *shardingBlobAccess) GetFromComposite(ctx context.Context, parentDigest, childDigest digest.Digest, slicer slicing.BlobSlicer) buffer.Buffer { index := ba.getBackendIndexByDigest(parentDigest) return buffer.WithErrorHandler( - ba.backends[index].GetFromComposite(ctx, parentDigest, childDigest, slicer), - shardIndexAddingErrorHandler{index: index}) + ba.backends[index].Backend.GetFromComposite(ctx, parentDigest, childDigest, slicer), + shardKeyAddingErrorHandler{key: ba.backends[index].Key}) } func (ba *shardingBlobAccess) Put(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { index := ba.getBackendIndexByDigest(digest) - if err := ba.backends[index].Put(ctx, digest, b); err != nil { - return util.StatusWrapf(err, "Shard %d", index) + if err := ba.backends[index].Backend.Put(ctx, digest, b); err != nil { + return util.StatusWrapf(err, "Shard %s", ba.backends[index].Key) } return nil } @@ -96,9 +86,9 @@ func (ba *shardingBlobAccess) FindMissing(ctx context.Context, digests digest.Se missingPerBackend = append(missingPerBackend, digest.EmptySet) missingOut := &missingPerBackend[len(missingPerBackend)-1] group.Go(func() error { - missing, err := ba.backends[index].FindMissing(ctxWithCancel, digests.Build()) + missing, err := ba.backends[index].Backend.FindMissing(ctxWithCancel, digests.Build()) if err != nil { - return util.StatusWrapf(err, "Shard %d", index) + return util.StatusWrapf(err, "Shard %s", ba.backends[index].Key) } *missingOut = missing return nil @@ -115,20 +105,20 @@ func (ba *shardingBlobAccess) FindMissing(ctx context.Context, digests digest.Se func (ba *shardingBlobAccess) GetCapabilities(ctx context.Context, instanceName digest.InstanceName) (*remoteexecution.ServerCapabilities, error) { // Spread requests across shards. - index := ba.getBackendIndexByHash(ba.getCapabilitiesRound.Add(1)) - capabilities, err := ba.backends[index].GetCapabilities(ctx, instanceName) + index := ba.shardSelector.GetShard(ba.getCapabilitiesRound.Add(1)) + capabilities, err := ba.backends[index].Backend.GetCapabilities(ctx, instanceName) if err != nil { - return nil, util.StatusWrapf(err, "Shard %d", index) + return nil, util.StatusWrapf(err, "Shard %s", ba.backends[index].Key) } return capabilities, nil } -type shardIndexAddingErrorHandler struct { - index int +type shardKeyAddingErrorHandler struct { + key string } -func (eh shardIndexAddingErrorHandler) OnError(err error) (buffer.Buffer, error) { - return nil, util.StatusWrapf(err, "Shard %d", eh.index) +func (eh shardKeyAddingErrorHandler) OnError(err error) (buffer.Buffer, error) { + return nil, util.StatusWrapf(err, "Shard %s", eh.key) } -func (eh shardIndexAddingErrorHandler) Done() {} +func (eh shardKeyAddingErrorHandler) Done() {} diff --git a/pkg/blobstore/sharding/sharding_blob_access_test.go b/pkg/blobstore/sharding/sharding_blob_access_test.go index 81d9cab9b..d850d763a 100644 --- a/pkg/blobstore/sharding/sharding_blob_access_test.go +++ b/pkg/blobstore/sharding/sharding_blob_access_test.go @@ -6,7 +6,7 @@ import ( remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" "github.com/buildbarn/bb-storage/internal/mock" - "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" "github.com/buildbarn/bb-storage/pkg/blobstore/sharding" "github.com/buildbarn/bb-storage/pkg/digest" @@ -24,38 +24,36 @@ func TestShardingBlobAccess(t *testing.T) { shard0 := mock.NewMockBlobAccess(ctrl) shard1 := mock.NewMockBlobAccess(ctrl) - shardPermuter := mock.NewMockShardPermuter(ctrl) + shardSelector := mock.NewMockShardSelector(ctrl) blobAccess := sharding.NewShardingBlobAccess( - []blobstore.BlobAccess{ - shard0, - shard1, - nil, // Shard that is explicitly drained. + []sharding.ShardBackend{ + { + Backend: shard0, + Key: "shard0", + }, + { + Backend: shard1, + Key: "shard1", + }, }, - shardPermuter, - /* hashInitialization = */ 0x62994904405896a1) + shardSelector, + ) helloDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 5) llDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "5b54c0a045f179bcbbbc9abcb8b5cd4c", 2) t.Run("GetFailure", func(t *testing.T) { - // Errors should be prefixed with a shard number. - shardPermuter.EXPECT().GetShard(uint64(0x7118d6877ee9ee3d), gomock.Any()).Do( - func(hash uint64, selector sharding.ShardSelector) { - require.True(t, selector(2)) - require.False(t, selector(1)) - }) + // Errors should be prefixed with the shard key. + shardSelector.EXPECT().GetShard(uint64(0x8b1a9953c4611296)).Return(1) shard1.EXPECT().Get(ctx, helloDigest). Return(buffer.NewBufferFromError(status.Error(codes.Unavailable, "Server offline"))) _, err := blobAccess.Get(ctx, helloDigest).ToByteSlice(1000) - testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Shard 1: Server offline"), err) + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Shard shard1: Server offline"), err) }) t.Run("GetSuccess", func(t *testing.T) { - shardPermuter.EXPECT().GetShard(uint64(0x7118d6877ee9ee3d), gomock.Any()).Do( - func(hash uint64, selector sharding.ShardSelector) { - require.False(t, selector(0)) - }) + shardSelector.EXPECT().GetShard(uint64(0x8b1a9953c4611296)).Return(0) shard0.EXPECT().Get(ctx, helloDigest). Return(buffer.NewValidatedBufferFromByteSlice([]byte("Hello"))) @@ -68,10 +66,7 @@ func TestShardingBlobAccess(t *testing.T) { // For reads from composite objects, the sharding needs // to be based on the parent digest. That digest was // used to upload the object to storage. - shardPermuter.EXPECT().GetShard(uint64(0x7118d6877ee9ee3d), gomock.Any()).Do( - func(hash uint64, selector sharding.ShardSelector) { - require.False(t, selector(0)) - }) + shardSelector.EXPECT().GetShard(uint64(0x8b1a9953c4611296)).Return(0) slicer := mock.NewMockBlobSlicer(ctrl) shard0.EXPECT().GetFromComposite(ctx, helloDigest, llDigest, slicer). Return(buffer.NewValidatedBufferFromByteSlice([]byte("ll"))) @@ -82,12 +77,8 @@ func TestShardingBlobAccess(t *testing.T) { }) t.Run("PutFailure", func(t *testing.T) { - // Errors should be prefixed with a shard number. - shardPermuter.EXPECT().GetShard(uint64(0x7118d6877ee9ee3d), gomock.Any()).Do( - func(hash uint64, selector sharding.ShardSelector) { - require.True(t, selector(2)) - require.False(t, selector(1)) - }) + // Errors should be prefixed with a shard key. + shardSelector.EXPECT().GetShard(uint64(0x8b1a9953c4611296)).Return(1) shard1.EXPECT().Put(ctx, helloDigest, gomock.Any()).DoAndReturn( func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { b.Discard() @@ -96,15 +87,12 @@ func TestShardingBlobAccess(t *testing.T) { testutil.RequireEqualStatus( t, - status.Error(codes.Unavailable, "Shard 1: Server offline"), + status.Error(codes.Unavailable, "Shard shard1: Server offline"), blobAccess.Put(ctx, helloDigest, buffer.NewValidatedBufferFromByteSlice([]byte("Hello")))) }) t.Run("PutSuccess", func(t *testing.T) { - shardPermuter.EXPECT().GetShard(uint64(0x7118d6877ee9ee3d), gomock.Any()).Do( - func(hash uint64, selector sharding.ShardSelector) { - require.False(t, selector(0)) - }) + shardSelector.EXPECT().GetShard(uint64(0x8b1a9953c4611296)).Return(0) shard0.EXPECT().Put(ctx, helloDigest, gomock.Any()).DoAndReturn( func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { data, err := b.ToByteSlice(1000) @@ -127,14 +115,8 @@ func TestShardingBlobAccess(t *testing.T) { // backends reports failure, we immediately cancel the // context for remaining requests, and return the first // error that occurred. - shardPermuter.EXPECT().GetShard(uint64(0xe4780eee2c3e5c4d), gomock.Any()).Do( - func(hash uint64, selector sharding.ShardSelector) { - require.False(t, selector(0)) - }) - shardPermuter.EXPECT().GetShard(uint64(0xb1e63d21c14e3f12), gomock.Any()).Do( - func(hash uint64, selector sharding.ShardSelector) { - require.False(t, selector(1)) - }) + shardSelector.EXPECT().GetShard(uint64(0x21f843aefbfb8862)).Return(0) + shardSelector.EXPECT().GetShard(uint64(0x48f2503cf369373b)).Return(1) shard0.EXPECT().FindMissing( gomock.Any(), digest1.ToSingletonSet(), @@ -152,26 +134,14 @@ func TestShardingBlobAccess(t *testing.T) { ctx, digest.NewSetBuilder().Add(digest1).Add(digest2).Build(), ) - testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Shard 0: Server offline"), err) + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Shard shard0: Server offline"), err) }) t.Run("FindMissingSuccess", func(t *testing.T) { - shardPermuter.EXPECT().GetShard(uint64(0xe4780eee2c3e5c4d), gomock.Any()).Do( - func(hash uint64, selector sharding.ShardSelector) { - require.False(t, selector(0)) - }) - shardPermuter.EXPECT().GetShard(uint64(0xb1e63d21c14e3f12), gomock.Any()).Do( - func(hash uint64, selector sharding.ShardSelector) { - require.False(t, selector(0)) - }) - shardPermuter.EXPECT().GetShard(uint64(0x71fb8268edc4f6e9), gomock.Any()).Do( - func(hash uint64, selector sharding.ShardSelector) { - require.False(t, selector(1)) - }) - shardPermuter.EXPECT().GetShard(uint64(0xc7a206e6fcdfda55), gomock.Any()).Do( - func(hash uint64, selector sharding.ShardSelector) { - require.False(t, selector(1)) - }) + shardSelector.EXPECT().GetShard(uint64(0x21f843aefbfb8862)).Return(0) + shardSelector.EXPECT().GetShard(uint64(0x48f2503cf369373b)).Return(0) + shardSelector.EXPECT().GetShard(uint64(0x942a5b4164c26ae5)).Return(1) + shardSelector.EXPECT().GetShard(uint64(0xf8f3da00ff286208)).Return(1) shard0.EXPECT().FindMissing( gomock.Any(), digest.NewSetBuilder().Add(digest1).Add(digest2).Build(), diff --git a/pkg/proto/configuration/blobstore/blobstore.pb.go b/pkg/proto/configuration/blobstore/blobstore.pb.go index d1b12d3af..428501050 100644 --- a/pkg/proto/configuration/blobstore/blobstore.pb.go +++ b/pkg/proto/configuration/blobstore/blobstore.pb.go @@ -498,11 +498,11 @@ func (x *ReadCachingBlobAccessConfiguration) GetReplicator() *BlobReplicatorConf } type ShardingBlobAccessConfiguration struct { - state protoimpl.MessageState `protogen:"open.v1"` - HashInitialization uint64 `protobuf:"varint,1,opt,name=hash_initialization,json=hashInitialization,proto3" json:"hash_initialization,omitempty"` - Shards []*ShardingBlobAccessConfiguration_Shard `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Shards map[string]*ShardingBlobAccessConfiguration_Shard `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Legacy *ShardingBlobAccessConfiguration_Legacy `protobuf:"bytes,3,opt,name=legacy,proto3" json:"legacy,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ShardingBlobAccessConfiguration) Reset() { @@ -535,16 +535,16 @@ func (*ShardingBlobAccessConfiguration) Descriptor() ([]byte, []int) { return file_pkg_proto_configuration_blobstore_blobstore_proto_rawDescGZIP(), []int{3} } -func (x *ShardingBlobAccessConfiguration) GetHashInitialization() uint64 { +func (x *ShardingBlobAccessConfiguration) GetShards() map[string]*ShardingBlobAccessConfiguration_Shard { if x != nil { - return x.HashInitialization + return x.Shards } - return 0 + return nil } -func (x *ShardingBlobAccessConfiguration) GetShards() []*ShardingBlobAccessConfiguration_Shard { +func (x *ShardingBlobAccessConfiguration) GetLegacy() *ShardingBlobAccessConfiguration_Legacy { if x != nil { - return x.Shards + return x.Legacy } return nil } @@ -1734,6 +1734,58 @@ func (x *ShardingBlobAccessConfiguration_Shard) GetWeight() uint32 { return 0 } +type ShardingBlobAccessConfiguration_Legacy struct { + state protoimpl.MessageState `protogen:"open.v1"` + ShardOrder []string `protobuf:"bytes,1,rep,name=shard_order,json=shardOrder,proto3" json:"shard_order,omitempty"` + HashInitialization uint64 `protobuf:"varint,2,opt,name=hash_initialization,json=hashInitialization,proto3" json:"hash_initialization,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ShardingBlobAccessConfiguration_Legacy) Reset() { + *x = ShardingBlobAccessConfiguration_Legacy{} + mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ShardingBlobAccessConfiguration_Legacy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShardingBlobAccessConfiguration_Legacy) ProtoMessage() {} + +func (x *ShardingBlobAccessConfiguration_Legacy) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShardingBlobAccessConfiguration_Legacy.ProtoReflect.Descriptor instead. +func (*ShardingBlobAccessConfiguration_Legacy) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_blobstore_blobstore_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *ShardingBlobAccessConfiguration_Legacy) GetShardOrder() []string { + if x != nil { + return x.ShardOrder + } + return nil +} + +func (x *ShardingBlobAccessConfiguration_Legacy) GetHashInitialization() uint64 { + if x != nil { + return x.HashInitialization + } + return 0 +} + type LocalBlobAccessConfiguration_KeyLocationMapInMemory struct { state protoimpl.MessageState `protogen:"open.v1"` Entries int64 `protobuf:"varint,1,opt,name=entries,proto3" json:"entries,omitempty"` @@ -1743,7 +1795,7 @@ type LocalBlobAccessConfiguration_KeyLocationMapInMemory struct { func (x *LocalBlobAccessConfiguration_KeyLocationMapInMemory) Reset() { *x = LocalBlobAccessConfiguration_KeyLocationMapInMemory{} - mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[21] + mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1755,7 +1807,7 @@ func (x *LocalBlobAccessConfiguration_KeyLocationMapInMemory) String() string { func (*LocalBlobAccessConfiguration_KeyLocationMapInMemory) ProtoMessage() {} func (x *LocalBlobAccessConfiguration_KeyLocationMapInMemory) ProtoReflect() protoreflect.Message { - mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[21] + mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1787,7 +1839,7 @@ type LocalBlobAccessConfiguration_BlocksInMemory struct { func (x *LocalBlobAccessConfiguration_BlocksInMemory) Reset() { *x = LocalBlobAccessConfiguration_BlocksInMemory{} - mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[22] + mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1799,7 +1851,7 @@ func (x *LocalBlobAccessConfiguration_BlocksInMemory) String() string { func (*LocalBlobAccessConfiguration_BlocksInMemory) ProtoMessage() {} func (x *LocalBlobAccessConfiguration_BlocksInMemory) ProtoReflect() protoreflect.Message { - mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[22] + mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1833,7 +1885,7 @@ type LocalBlobAccessConfiguration_BlocksOnBlockDevice struct { func (x *LocalBlobAccessConfiguration_BlocksOnBlockDevice) Reset() { *x = LocalBlobAccessConfiguration_BlocksOnBlockDevice{} - mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[23] + mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1845,7 +1897,7 @@ func (x *LocalBlobAccessConfiguration_BlocksOnBlockDevice) String() string { func (*LocalBlobAccessConfiguration_BlocksOnBlockDevice) ProtoMessage() {} func (x *LocalBlobAccessConfiguration_BlocksOnBlockDevice) ProtoReflect() protoreflect.Message { - mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[23] + mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1892,7 +1944,7 @@ type LocalBlobAccessConfiguration_Persistent struct { func (x *LocalBlobAccessConfiguration_Persistent) Reset() { *x = LocalBlobAccessConfiguration_Persistent{} - mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[24] + mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1904,7 +1956,7 @@ func (x *LocalBlobAccessConfiguration_Persistent) String() string { func (*LocalBlobAccessConfiguration_Persistent) ProtoMessage() {} func (x *LocalBlobAccessConfiguration_Persistent) ProtoReflect() protoreflect.Message { - mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[24] + mi := &file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes[26] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2126,26 +2178,44 @@ var file_pkg_proto_configuration_blobstore_blobstore_proto_rawDesc = string([]by 0x6e, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x6f, 0x72, 0x22, 0xab, 0x02, 0x0a, 0x1f, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, + 0x74, 0x6f, 0x72, 0x22, 0xcb, 0x04, 0x0a, 0x1f, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x13, 0x68, 0x61, 0x73, 0x68, 0x5f, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x68, 0x61, 0x73, 0x68, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, - 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x60, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0x75, 0x0a, 0x05, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x54, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, - 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x22, 0xa9, 0x03, 0x0a, 0x1f, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x42, 0x6c, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x66, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, + 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, + 0x61, 0x0a, 0x06, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x49, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x62, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x52, 0x06, 0x6c, 0x65, 0x67, 0x61, + 0x63, 0x79, 0x1a, 0x75, 0x0a, 0x05, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x54, 0x0a, 0x07, 0x62, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, + 0x64, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x1a, 0x5a, 0x0a, 0x06, 0x4c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4f, + 0x72, 0x64, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x13, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x12, 0x68, 0x61, 0x73, 0x68, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x83, 0x01, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x5e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, + 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x22, 0xa9, 0x03, 0x0a, 0x1f, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x57, 0x0a, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, @@ -2555,52 +2625,54 @@ func file_pkg_proto_configuration_blobstore_blobstore_proto_rawDescGZIP() []byte return file_pkg_proto_configuration_blobstore_blobstore_proto_rawDescData } -var file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_pkg_proto_configuration_blobstore_blobstore_proto_msgTypes = make([]protoimpl.MessageInfo, 29) var file_pkg_proto_configuration_blobstore_blobstore_proto_goTypes = []any{ - (*BlobstoreConfiguration)(nil), // 0: buildbarn.configuration.blobstore.BlobstoreConfiguration - (*BlobAccessConfiguration)(nil), // 1: buildbarn.configuration.blobstore.BlobAccessConfiguration - (*ReadCachingBlobAccessConfiguration)(nil), // 2: buildbarn.configuration.blobstore.ReadCachingBlobAccessConfiguration - (*ShardingBlobAccessConfiguration)(nil), // 3: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration - (*MirroredBlobAccessConfiguration)(nil), // 4: buildbarn.configuration.blobstore.MirroredBlobAccessConfiguration - (*LocalBlobAccessConfiguration)(nil), // 5: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration - (*ExistenceCachingBlobAccessConfiguration)(nil), // 6: buildbarn.configuration.blobstore.ExistenceCachingBlobAccessConfiguration - (*CompletenessCheckingBlobAccessConfiguration)(nil), // 7: buildbarn.configuration.blobstore.CompletenessCheckingBlobAccessConfiguration - (*ReadFallbackBlobAccessConfiguration)(nil), // 8: buildbarn.configuration.blobstore.ReadFallbackBlobAccessConfiguration - (*ReferenceExpandingBlobAccessConfiguration)(nil), // 9: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration - (*BlobReplicatorConfiguration)(nil), // 10: buildbarn.configuration.blobstore.BlobReplicatorConfiguration - (*QueuedBlobReplicatorConfiguration)(nil), // 11: buildbarn.configuration.blobstore.QueuedBlobReplicatorConfiguration - (*ConcurrencyLimitingBlobReplicatorConfiguration)(nil), // 12: buildbarn.configuration.blobstore.ConcurrencyLimitingBlobReplicatorConfiguration - (*DemultiplexingBlobAccessConfiguration)(nil), // 13: buildbarn.configuration.blobstore.DemultiplexingBlobAccessConfiguration - (*DemultiplexedBlobAccessConfiguration)(nil), // 14: buildbarn.configuration.blobstore.DemultiplexedBlobAccessConfiguration - (*ActionResultExpiringBlobAccessConfiguration)(nil), // 15: buildbarn.configuration.blobstore.ActionResultExpiringBlobAccessConfiguration - (*ReadCanaryingBlobAccessConfiguration)(nil), // 16: buildbarn.configuration.blobstore.ReadCanaryingBlobAccessConfiguration - (*ZIPBlobAccessConfiguration)(nil), // 17: buildbarn.configuration.blobstore.ZIPBlobAccessConfiguration - (*WithLabelsBlobAccessConfiguration)(nil), // 18: buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration - (*DeadlineEnforcingBlobAccess)(nil), // 19: buildbarn.configuration.blobstore.DeadlineEnforcingBlobAccess - (*ShardingBlobAccessConfiguration_Shard)(nil), // 20: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.Shard - (*LocalBlobAccessConfiguration_KeyLocationMapInMemory)(nil), // 21: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.KeyLocationMapInMemory - (*LocalBlobAccessConfiguration_BlocksInMemory)(nil), // 22: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksInMemory - (*LocalBlobAccessConfiguration_BlocksOnBlockDevice)(nil), // 23: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksOnBlockDevice - (*LocalBlobAccessConfiguration_Persistent)(nil), // 24: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.Persistent - nil, // 25: buildbarn.configuration.blobstore.DemultiplexingBlobAccessConfiguration.InstanceNamePrefixesEntry - nil, // 26: buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration.LabelsEntry - (*grpc.ClientConfiguration)(nil), // 27: buildbarn.configuration.grpc.ClientConfiguration - (*status.Status)(nil), // 28: google.rpc.Status - (*blockdevice.Configuration)(nil), // 29: buildbarn.configuration.blockdevice.Configuration - (*digest.ExistenceCacheConfiguration)(nil), // 30: buildbarn.configuration.digest.ExistenceCacheConfiguration - (*aws.SessionConfiguration)(nil), // 31: buildbarn.configuration.cloud.aws.SessionConfiguration - (*http.ClientConfiguration)(nil), // 32: buildbarn.configuration.http.ClientConfiguration - (*gcp.ClientOptionsConfiguration)(nil), // 33: buildbarn.configuration.cloud.gcp.ClientOptionsConfiguration - (*emptypb.Empty)(nil), // 34: google.protobuf.Empty - (*durationpb.Duration)(nil), // 35: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 36: google.protobuf.Timestamp + (*BlobstoreConfiguration)(nil), // 0: buildbarn.configuration.blobstore.BlobstoreConfiguration + (*BlobAccessConfiguration)(nil), // 1: buildbarn.configuration.blobstore.BlobAccessConfiguration + (*ReadCachingBlobAccessConfiguration)(nil), // 2: buildbarn.configuration.blobstore.ReadCachingBlobAccessConfiguration + (*ShardingBlobAccessConfiguration)(nil), // 3: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration + (*MirroredBlobAccessConfiguration)(nil), // 4: buildbarn.configuration.blobstore.MirroredBlobAccessConfiguration + (*LocalBlobAccessConfiguration)(nil), // 5: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration + (*ExistenceCachingBlobAccessConfiguration)(nil), // 6: buildbarn.configuration.blobstore.ExistenceCachingBlobAccessConfiguration + (*CompletenessCheckingBlobAccessConfiguration)(nil), // 7: buildbarn.configuration.blobstore.CompletenessCheckingBlobAccessConfiguration + (*ReadFallbackBlobAccessConfiguration)(nil), // 8: buildbarn.configuration.blobstore.ReadFallbackBlobAccessConfiguration + (*ReferenceExpandingBlobAccessConfiguration)(nil), // 9: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration + (*BlobReplicatorConfiguration)(nil), // 10: buildbarn.configuration.blobstore.BlobReplicatorConfiguration + (*QueuedBlobReplicatorConfiguration)(nil), // 11: buildbarn.configuration.blobstore.QueuedBlobReplicatorConfiguration + (*ConcurrencyLimitingBlobReplicatorConfiguration)(nil), // 12: buildbarn.configuration.blobstore.ConcurrencyLimitingBlobReplicatorConfiguration + (*DemultiplexingBlobAccessConfiguration)(nil), // 13: buildbarn.configuration.blobstore.DemultiplexingBlobAccessConfiguration + (*DemultiplexedBlobAccessConfiguration)(nil), // 14: buildbarn.configuration.blobstore.DemultiplexedBlobAccessConfiguration + (*ActionResultExpiringBlobAccessConfiguration)(nil), // 15: buildbarn.configuration.blobstore.ActionResultExpiringBlobAccessConfiguration + (*ReadCanaryingBlobAccessConfiguration)(nil), // 16: buildbarn.configuration.blobstore.ReadCanaryingBlobAccessConfiguration + (*ZIPBlobAccessConfiguration)(nil), // 17: buildbarn.configuration.blobstore.ZIPBlobAccessConfiguration + (*WithLabelsBlobAccessConfiguration)(nil), // 18: buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration + (*DeadlineEnforcingBlobAccess)(nil), // 19: buildbarn.configuration.blobstore.DeadlineEnforcingBlobAccess + (*ShardingBlobAccessConfiguration_Shard)(nil), // 20: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.Shard + (*ShardingBlobAccessConfiguration_Legacy)(nil), // 21: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.Legacy + nil, // 22: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.ShardsEntry + (*LocalBlobAccessConfiguration_KeyLocationMapInMemory)(nil), // 23: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.KeyLocationMapInMemory + (*LocalBlobAccessConfiguration_BlocksInMemory)(nil), // 24: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksInMemory + (*LocalBlobAccessConfiguration_BlocksOnBlockDevice)(nil), // 25: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksOnBlockDevice + (*LocalBlobAccessConfiguration_Persistent)(nil), // 26: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.Persistent + nil, // 27: buildbarn.configuration.blobstore.DemultiplexingBlobAccessConfiguration.InstanceNamePrefixesEntry + nil, // 28: buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration.LabelsEntry + (*grpc.ClientConfiguration)(nil), // 29: buildbarn.configuration.grpc.ClientConfiguration + (*status.Status)(nil), // 30: google.rpc.Status + (*blockdevice.Configuration)(nil), // 31: buildbarn.configuration.blockdevice.Configuration + (*digest.ExistenceCacheConfiguration)(nil), // 32: buildbarn.configuration.digest.ExistenceCacheConfiguration + (*aws.SessionConfiguration)(nil), // 33: buildbarn.configuration.cloud.aws.SessionConfiguration + (*http.ClientConfiguration)(nil), // 34: buildbarn.configuration.http.ClientConfiguration + (*gcp.ClientOptionsConfiguration)(nil), // 35: buildbarn.configuration.cloud.gcp.ClientOptionsConfiguration + (*emptypb.Empty)(nil), // 36: google.protobuf.Empty + (*durationpb.Duration)(nil), // 37: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 38: google.protobuf.Timestamp } var file_pkg_proto_configuration_blobstore_blobstore_proto_depIdxs = []int32{ 1, // 0: buildbarn.configuration.blobstore.BlobstoreConfiguration.content_addressable_storage:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration 1, // 1: buildbarn.configuration.blobstore.BlobstoreConfiguration.action_cache:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration 2, // 2: buildbarn.configuration.blobstore.BlobAccessConfiguration.read_caching:type_name -> buildbarn.configuration.blobstore.ReadCachingBlobAccessConfiguration - 27, // 3: buildbarn.configuration.blobstore.BlobAccessConfiguration.grpc:type_name -> buildbarn.configuration.grpc.ClientConfiguration - 28, // 4: buildbarn.configuration.blobstore.BlobAccessConfiguration.error:type_name -> google.rpc.Status + 29, // 3: buildbarn.configuration.blobstore.BlobAccessConfiguration.grpc:type_name -> buildbarn.configuration.grpc.ClientConfiguration + 30, // 4: buildbarn.configuration.blobstore.BlobAccessConfiguration.error:type_name -> google.rpc.Status 3, // 5: buildbarn.configuration.blobstore.BlobAccessConfiguration.sharding:type_name -> buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration 4, // 6: buildbarn.configuration.blobstore.BlobAccessConfiguration.mirrored:type_name -> buildbarn.configuration.blobstore.MirroredBlobAccessConfiguration 5, // 7: buildbarn.configuration.blobstore.BlobAccessConfiguration.local:type_name -> buildbarn.configuration.blobstore.LocalBlobAccessConfiguration @@ -2619,61 +2691,63 @@ var file_pkg_proto_configuration_blobstore_blobstore_proto_depIdxs = []int32{ 1, // 20: buildbarn.configuration.blobstore.ReadCachingBlobAccessConfiguration.slow:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration 1, // 21: buildbarn.configuration.blobstore.ReadCachingBlobAccessConfiguration.fast:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration 10, // 22: buildbarn.configuration.blobstore.ReadCachingBlobAccessConfiguration.replicator:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration - 20, // 23: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.shards:type_name -> buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.Shard - 1, // 24: buildbarn.configuration.blobstore.MirroredBlobAccessConfiguration.backend_a:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 1, // 25: buildbarn.configuration.blobstore.MirroredBlobAccessConfiguration.backend_b:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 10, // 26: buildbarn.configuration.blobstore.MirroredBlobAccessConfiguration.replicator_a_to_b:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration - 10, // 27: buildbarn.configuration.blobstore.MirroredBlobAccessConfiguration.replicator_b_to_a:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration - 21, // 28: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.key_location_map_in_memory:type_name -> buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.KeyLocationMapInMemory - 29, // 29: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.key_location_map_on_block_device:type_name -> buildbarn.configuration.blockdevice.Configuration - 22, // 30: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.blocks_in_memory:type_name -> buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksInMemory - 23, // 31: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.blocks_on_block_device:type_name -> buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksOnBlockDevice - 24, // 32: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.persistent:type_name -> buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.Persistent - 1, // 33: buildbarn.configuration.blobstore.ExistenceCachingBlobAccessConfiguration.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 30, // 34: buildbarn.configuration.blobstore.ExistenceCachingBlobAccessConfiguration.existence_cache:type_name -> buildbarn.configuration.digest.ExistenceCacheConfiguration - 1, // 35: buildbarn.configuration.blobstore.CompletenessCheckingBlobAccessConfiguration.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 1, // 36: buildbarn.configuration.blobstore.ReadFallbackBlobAccessConfiguration.primary:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 1, // 37: buildbarn.configuration.blobstore.ReadFallbackBlobAccessConfiguration.secondary:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 10, // 38: buildbarn.configuration.blobstore.ReadFallbackBlobAccessConfiguration.replicator:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration - 1, // 39: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration.indirect_content_addressable_storage:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 31, // 40: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration.aws_session:type_name -> buildbarn.configuration.cloud.aws.SessionConfiguration - 32, // 41: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration.http_client:type_name -> buildbarn.configuration.http.ClientConfiguration - 33, // 42: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration.gcp_client_options:type_name -> buildbarn.configuration.cloud.gcp.ClientOptionsConfiguration - 1, // 43: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration.content_addressable_storage:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 34, // 44: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.local:type_name -> google.protobuf.Empty - 27, // 45: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.remote:type_name -> buildbarn.configuration.grpc.ClientConfiguration - 11, // 46: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.queued:type_name -> buildbarn.configuration.blobstore.QueuedBlobReplicatorConfiguration - 34, // 47: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.noop:type_name -> google.protobuf.Empty - 10, // 48: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.deduplicating:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration - 12, // 49: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.concurrency_limiting:type_name -> buildbarn.configuration.blobstore.ConcurrencyLimitingBlobReplicatorConfiguration - 10, // 50: buildbarn.configuration.blobstore.QueuedBlobReplicatorConfiguration.base:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration - 30, // 51: buildbarn.configuration.blobstore.QueuedBlobReplicatorConfiguration.existence_cache:type_name -> buildbarn.configuration.digest.ExistenceCacheConfiguration - 10, // 52: buildbarn.configuration.blobstore.ConcurrencyLimitingBlobReplicatorConfiguration.base:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration - 25, // 53: buildbarn.configuration.blobstore.DemultiplexingBlobAccessConfiguration.instance_name_prefixes:type_name -> buildbarn.configuration.blobstore.DemultiplexingBlobAccessConfiguration.InstanceNamePrefixesEntry - 1, // 54: buildbarn.configuration.blobstore.DemultiplexedBlobAccessConfiguration.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 1, // 55: buildbarn.configuration.blobstore.ActionResultExpiringBlobAccessConfiguration.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 35, // 56: buildbarn.configuration.blobstore.ActionResultExpiringBlobAccessConfiguration.minimum_validity:type_name -> google.protobuf.Duration - 35, // 57: buildbarn.configuration.blobstore.ActionResultExpiringBlobAccessConfiguration.maximum_validity_jitter:type_name -> google.protobuf.Duration - 36, // 58: buildbarn.configuration.blobstore.ActionResultExpiringBlobAccessConfiguration.minimum_timestamp:type_name -> google.protobuf.Timestamp - 1, // 59: buildbarn.configuration.blobstore.ReadCanaryingBlobAccessConfiguration.source:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 1, // 60: buildbarn.configuration.blobstore.ReadCanaryingBlobAccessConfiguration.replica:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 35, // 61: buildbarn.configuration.blobstore.ReadCanaryingBlobAccessConfiguration.maximum_cache_duration:type_name -> google.protobuf.Duration - 30, // 62: buildbarn.configuration.blobstore.ZIPBlobAccessConfiguration.data_integrity_validation_cache:type_name -> buildbarn.configuration.digest.ExistenceCacheConfiguration - 1, // 63: buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 26, // 64: buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration.labels:type_name -> buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration.LabelsEntry - 35, // 65: buildbarn.configuration.blobstore.DeadlineEnforcingBlobAccess.timeout:type_name -> google.protobuf.Duration - 1, // 66: buildbarn.configuration.blobstore.DeadlineEnforcingBlobAccess.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 1, // 67: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.Shard.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 29, // 68: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksOnBlockDevice.source:type_name -> buildbarn.configuration.blockdevice.Configuration - 30, // 69: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksOnBlockDevice.data_integrity_validation_cache:type_name -> buildbarn.configuration.digest.ExistenceCacheConfiguration - 35, // 70: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.Persistent.minimum_epoch_interval:type_name -> google.protobuf.Duration - 14, // 71: buildbarn.configuration.blobstore.DemultiplexingBlobAccessConfiguration.InstanceNamePrefixesEntry.value:type_name -> buildbarn.configuration.blobstore.DemultiplexedBlobAccessConfiguration - 1, // 72: buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration.LabelsEntry.value:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration - 73, // [73:73] is the sub-list for method output_type - 73, // [73:73] is the sub-list for method input_type - 73, // [73:73] is the sub-list for extension type_name - 73, // [73:73] is the sub-list for extension extendee - 0, // [0:73] is the sub-list for field type_name + 22, // 23: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.shards:type_name -> buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.ShardsEntry + 21, // 24: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.legacy:type_name -> buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.Legacy + 1, // 25: buildbarn.configuration.blobstore.MirroredBlobAccessConfiguration.backend_a:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 1, // 26: buildbarn.configuration.blobstore.MirroredBlobAccessConfiguration.backend_b:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 10, // 27: buildbarn.configuration.blobstore.MirroredBlobAccessConfiguration.replicator_a_to_b:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration + 10, // 28: buildbarn.configuration.blobstore.MirroredBlobAccessConfiguration.replicator_b_to_a:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration + 23, // 29: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.key_location_map_in_memory:type_name -> buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.KeyLocationMapInMemory + 31, // 30: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.key_location_map_on_block_device:type_name -> buildbarn.configuration.blockdevice.Configuration + 24, // 31: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.blocks_in_memory:type_name -> buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksInMemory + 25, // 32: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.blocks_on_block_device:type_name -> buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksOnBlockDevice + 26, // 33: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.persistent:type_name -> buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.Persistent + 1, // 34: buildbarn.configuration.blobstore.ExistenceCachingBlobAccessConfiguration.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 32, // 35: buildbarn.configuration.blobstore.ExistenceCachingBlobAccessConfiguration.existence_cache:type_name -> buildbarn.configuration.digest.ExistenceCacheConfiguration + 1, // 36: buildbarn.configuration.blobstore.CompletenessCheckingBlobAccessConfiguration.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 1, // 37: buildbarn.configuration.blobstore.ReadFallbackBlobAccessConfiguration.primary:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 1, // 38: buildbarn.configuration.blobstore.ReadFallbackBlobAccessConfiguration.secondary:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 10, // 39: buildbarn.configuration.blobstore.ReadFallbackBlobAccessConfiguration.replicator:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration + 1, // 40: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration.indirect_content_addressable_storage:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 33, // 41: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration.aws_session:type_name -> buildbarn.configuration.cloud.aws.SessionConfiguration + 34, // 42: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration.http_client:type_name -> buildbarn.configuration.http.ClientConfiguration + 35, // 43: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration.gcp_client_options:type_name -> buildbarn.configuration.cloud.gcp.ClientOptionsConfiguration + 1, // 44: buildbarn.configuration.blobstore.ReferenceExpandingBlobAccessConfiguration.content_addressable_storage:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 36, // 45: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.local:type_name -> google.protobuf.Empty + 29, // 46: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.remote:type_name -> buildbarn.configuration.grpc.ClientConfiguration + 11, // 47: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.queued:type_name -> buildbarn.configuration.blobstore.QueuedBlobReplicatorConfiguration + 36, // 48: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.noop:type_name -> google.protobuf.Empty + 10, // 49: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.deduplicating:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration + 12, // 50: buildbarn.configuration.blobstore.BlobReplicatorConfiguration.concurrency_limiting:type_name -> buildbarn.configuration.blobstore.ConcurrencyLimitingBlobReplicatorConfiguration + 10, // 51: buildbarn.configuration.blobstore.QueuedBlobReplicatorConfiguration.base:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration + 32, // 52: buildbarn.configuration.blobstore.QueuedBlobReplicatorConfiguration.existence_cache:type_name -> buildbarn.configuration.digest.ExistenceCacheConfiguration + 10, // 53: buildbarn.configuration.blobstore.ConcurrencyLimitingBlobReplicatorConfiguration.base:type_name -> buildbarn.configuration.blobstore.BlobReplicatorConfiguration + 27, // 54: buildbarn.configuration.blobstore.DemultiplexingBlobAccessConfiguration.instance_name_prefixes:type_name -> buildbarn.configuration.blobstore.DemultiplexingBlobAccessConfiguration.InstanceNamePrefixesEntry + 1, // 55: buildbarn.configuration.blobstore.DemultiplexedBlobAccessConfiguration.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 1, // 56: buildbarn.configuration.blobstore.ActionResultExpiringBlobAccessConfiguration.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 37, // 57: buildbarn.configuration.blobstore.ActionResultExpiringBlobAccessConfiguration.minimum_validity:type_name -> google.protobuf.Duration + 37, // 58: buildbarn.configuration.blobstore.ActionResultExpiringBlobAccessConfiguration.maximum_validity_jitter:type_name -> google.protobuf.Duration + 38, // 59: buildbarn.configuration.blobstore.ActionResultExpiringBlobAccessConfiguration.minimum_timestamp:type_name -> google.protobuf.Timestamp + 1, // 60: buildbarn.configuration.blobstore.ReadCanaryingBlobAccessConfiguration.source:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 1, // 61: buildbarn.configuration.blobstore.ReadCanaryingBlobAccessConfiguration.replica:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 37, // 62: buildbarn.configuration.blobstore.ReadCanaryingBlobAccessConfiguration.maximum_cache_duration:type_name -> google.protobuf.Duration + 32, // 63: buildbarn.configuration.blobstore.ZIPBlobAccessConfiguration.data_integrity_validation_cache:type_name -> buildbarn.configuration.digest.ExistenceCacheConfiguration + 1, // 64: buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 28, // 65: buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration.labels:type_name -> buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration.LabelsEntry + 37, // 66: buildbarn.configuration.blobstore.DeadlineEnforcingBlobAccess.timeout:type_name -> google.protobuf.Duration + 1, // 67: buildbarn.configuration.blobstore.DeadlineEnforcingBlobAccess.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 1, // 68: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.Shard.backend:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 20, // 69: buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.ShardsEntry.value:type_name -> buildbarn.configuration.blobstore.ShardingBlobAccessConfiguration.Shard + 31, // 70: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksOnBlockDevice.source:type_name -> buildbarn.configuration.blockdevice.Configuration + 32, // 71: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.BlocksOnBlockDevice.data_integrity_validation_cache:type_name -> buildbarn.configuration.digest.ExistenceCacheConfiguration + 37, // 72: buildbarn.configuration.blobstore.LocalBlobAccessConfiguration.Persistent.minimum_epoch_interval:type_name -> google.protobuf.Duration + 14, // 73: buildbarn.configuration.blobstore.DemultiplexingBlobAccessConfiguration.InstanceNamePrefixesEntry.value:type_name -> buildbarn.configuration.blobstore.DemultiplexedBlobAccessConfiguration + 1, // 74: buildbarn.configuration.blobstore.WithLabelsBlobAccessConfiguration.LabelsEntry.value:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 75, // [75:75] is the sub-list for method output_type + 75, // [75:75] is the sub-list for method input_type + 75, // [75:75] is the sub-list for extension type_name + 75, // [75:75] is the sub-list for extension extendee + 0, // [0:75] is the sub-list for field type_name } func init() { file_pkg_proto_configuration_blobstore_blobstore_proto_init() } @@ -2722,7 +2796,7 @@ func file_pkg_proto_configuration_blobstore_blobstore_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_proto_configuration_blobstore_blobstore_proto_rawDesc), len(file_pkg_proto_configuration_blobstore_blobstore_proto_rawDesc)), NumEnums: 0, - NumMessages: 27, + NumMessages: 29, NumExtensions: 0, NumServices: 0, }, diff --git a/pkg/proto/configuration/blobstore/blobstore.proto b/pkg/proto/configuration/blobstore/blobstore.proto index 4946750a8..8f3409886 100644 --- a/pkg/proto/configuration/blobstore/blobstore.proto +++ b/pkg/proto/configuration/blobstore/blobstore.proto @@ -281,22 +281,36 @@ message ShardingBlobAccessConfiguration { uint32 weight = 2; } - // Initialization for the hashing algorithm used to partition the - // key space. This should be a random 64-bit value that is unique to - // this deployment. Failure to do so may result in poor distribution - // in case sharding is nested. - // - // Changing this value will in effect cause a full repartitioning of - // the data. - uint64 hash_initialization = 1; - - // Shards to which requests are routed. To reduce the need for full - // repartitioning of the data when growing a cluster, it's possible - // to terminate this list with a drained backend that increases the - // total weight up to a given number. Newly added backends may - // allocate their weight from this backend, thereby causing most of - // the keyspace to still be routed to its original backend. - repeated Shard shards = 2; + message Legacy { + // Order of the shards for the legacy schema. Each key here refers to + // a corresponding key in the 'shard_map' or null for drained backends. + repeated string shard_order = 1; + // Hash initialization seed used for legacy schema. + uint64 hash_initialization = 2; + } + + // Was 'hash_initialization' a seed for the shards array, has been made + // redundant with the 'shard_map', for running in compatible legacy mode set + // the 'hash_initialization' in 'legacy'. + reserved 1; + + // Shards identified by a key within the context of this sharding + // configuration. The key is a freeform string which describes the identity + // of the shard in the context of the current sharding configuration. + // Shards are chosen via Rendezvous hashing based on the digest, weight and + // key of the configuration. + // + // When removing a shard from the map it is guaranteed that only blobs + // which resolved to the removed shard will get a different shard. When + // adding shards there is a weight/total_weight probability that any given + // blob will be resolved to the new shards. + map shards = 2; + + // A temporary legacy mode which allows clients to use storage backends which + // are sharded with the old sharding topology implementation. Consumers are + // expected to migrate in a timely fashion and support for the legacy schema + // will be removed by 2025-12-31. + Legacy legacy = 3; } message MirroredBlobAccessConfiguration {