From 4035c7c6ff6e7aa8b4e33f5b13bbc044cf869d1a Mon Sep 17 00:00:00 2001 From: yrizhkov Date: Fri, 17 Oct 2025 10:56:54 +0300 Subject: [PATCH] Enhance error handling and messages --- config_partition_filter.go | 6 ++-- config_restore.go | 4 +-- io/aerospike/sindex_writer.go | 54 ++++++++++++++++----------------- io/aerospike/udf_writer.go | 2 +- io/storage/aws/s3/reader.go | 1 + io/storage/aws/s3/writer.go | 10 +++--- io/storage/azure/blob/reader.go | 5 +-- pkg/asinfo/client.go | 2 +- 8 files changed, 42 insertions(+), 42 deletions(-) diff --git a/config_partition_filter.go b/config_partition_filter.go index b78783f3d..2d9324992 100644 --- a/config_partition_filter.go +++ b/config_partition_filter.go @@ -254,10 +254,10 @@ func ParsePartitionFilterListString(namespace, filters string) ([]*a.PartitionFi filterSlice := strings.Split(filters, ",") partitionFilters := make([]*a.PartitionFilter, 0, len(filterSlice)) - for i := range filterSlice { - partitionFilter, err := ParsePartitionFilterString(namespace, filterSlice[i]) + for _, filter := range filterSlice { + partitionFilter, err := ParsePartitionFilterString(namespace, filter) if err != nil { - return nil, fmt.Errorf("failed to parse partition filter, filter: %s, err: %v", filterSlice[i], err) + return nil, fmt.Errorf("failed to parse partition filter, filter: %s, err: %w", filter, err) } partitionFilters = append(partitionFilters, partitionFilter) diff --git a/config_restore.go b/config_restore.go index 935a68941..0eee2c4dd 100644 --- a/config_restore.go +++ b/config_restore.go @@ -28,7 +28,7 @@ type ConfigRestore struct { WritePolicy *a.WritePolicy // Namespace details for the restore operation. // By default, the data is restored to the namespace from which it was taken. - Namespace *RestoreNamespaceConfig `json:"namespace,omitempty"` + Namespace *RestoreNamespaceConfig // Encryption details. EncryptionPolicy *EncryptionPolicy // Compression details. @@ -89,7 +89,7 @@ type ConfigRestore struct { MetricsEnabled bool // ValidateOnly indicates whether restore should only validate the backup files. ValidateOnly bool - // ApplyMetadataLast defines when to restore metadata (Sindexes and UDFs). + // ApplyMetadataLast defines when to restore metadata (secondary indexes and UDFs). // If set to true, metadata will be restored after all records have been processed. ApplyMetadataLast bool // IgnoreUnknownFields indicates whether restore should ignore unknown to decoder fields. diff --git a/io/aerospike/sindex_writer.go b/io/aerospike/sindex_writer.go index 66f0d1f27..355e37317 100644 --- a/io/aerospike/sindex_writer.go +++ b/io/aerospike/sindex_writer.go @@ -31,32 +31,32 @@ type sindexWriter struct { // writeSecondaryIndex writes a secondary index to Aerospike. func (rw sindexWriter) writeSecondaryIndex(si *models.SIndex) error { - var sindexType a.IndexType + var sIndexType a.IndexType switch si.Path.BinType { case models.NumericSIDataType: - sindexType = a.NUMERIC + sIndexType = a.NUMERIC case models.StringSIDataType: - sindexType = a.STRING + sIndexType = a.STRING case models.BlobSIDataType: - sindexType = a.BLOB + sIndexType = a.BLOB case models.GEO2DSphereSIDataType: - sindexType = a.GEO2DSPHERE + sIndexType = a.GEO2DSPHERE default: return fmt.Errorf("invalid sindex bin type: %c", si.Path.BinType) } - var sindexCollectionType a.IndexCollectionType + var sIndexCollectionType a.IndexCollectionType switch si.IndexType { case models.BinSIndex: - sindexCollectionType = a.ICT_DEFAULT + sIndexCollectionType = a.ICT_DEFAULT case models.ListElementSIndex: - sindexCollectionType = a.ICT_LIST + sIndexCollectionType = a.ICT_LIST case models.MapKeySIndex: - sindexCollectionType = a.ICT_MAPKEYS + sIndexCollectionType = a.ICT_MAPKEYS case models.MapValueSIndex: - sindexCollectionType = a.ICT_MAPVALUES + sIndexCollectionType = a.ICT_MAPVALUES default: return fmt.Errorf("invalid sindex collection type: %c", si.IndexType) } @@ -84,33 +84,33 @@ func (rw sindexWriter) writeSecondaryIndex(si *models.SIndex) error { job, aErr := rw.createIndex( rw.writePolicy, si, - sindexType, - sindexCollectionType, + sIndexType, + sIndexCollectionType, exp, ctx..., ) - if err != nil { + if aErr != nil { if aErr.Matches(atypes.INDEX_FOUND) { - rw.logger.Debug("index already exists, replacing it", "sindex", si.Name) + rw.logger.Debug("secondary index already exists, replacing it", "name", si.Name) - err = rw.asc.DropIndex(rw.writePolicy, si.Namespace, si.Set, si.Name) + err := rw.asc.DropIndex(rw.writePolicy, si.Namespace, si.Set, si.Name) if err != nil { return fmt.Errorf("error dropping sindex %s: %w", si.Name, err) } - job, aErr = rw.createIndex( + job, err = rw.createIndex( rw.writePolicy, si, - sindexType, - sindexCollectionType, + sIndexType, + sIndexCollectionType, exp, ctx..., ) - if aErr != nil { + if err != nil { return fmt.Errorf("error creating replacement sindex %s: %w", si.Name, err) } } else { - return fmt.Errorf("error creating sindex %s: %w", si.Name, err) + return fmt.Errorf("error creating sindex %s: %w", si.Name, aErr) } } @@ -128,7 +128,7 @@ func (rw sindexWriter) writeSecondaryIndex(si *models.SIndex) error { return fmt.Errorf("error creating sindex %s: %w", si.Name, err) } - rw.logger.Debug("created sindex", "sindex", si.Name) + rw.logger.Debug("created secondary index", slog.String("name", si.Name)) return nil } @@ -136,8 +136,8 @@ func (rw sindexWriter) writeSecondaryIndex(si *models.SIndex) error { func (rw sindexWriter) createIndex( wp *a.WritePolicy, si *models.SIndex, - sindexType a.IndexType, - sindexCollectionType a.IndexCollectionType, + sIndexType a.IndexType, + sIndexCollectionType a.IndexCollectionType, exp *a.Expression, ctx ...*a.CDTContext, ) (*a.IndexTask, a.Error) { @@ -147,8 +147,8 @@ func (rw sindexWriter) createIndex( si.Namespace, si.Set, si.Name, - sindexType, - sindexCollectionType, + sIndexType, + sIndexCollectionType, exp, ) } @@ -159,8 +159,8 @@ func (rw sindexWriter) createIndex( si.Set, si.Name, si.Path.BinName, - sindexType, - sindexCollectionType, + sIndexType, + sIndexCollectionType, ctx..., ) } diff --git a/io/aerospike/udf_writer.go b/io/aerospike/udf_writer.go index 235174fc7..12fcf8326 100644 --- a/io/aerospike/udf_writer.go +++ b/io/aerospike/udf_writer.go @@ -58,7 +58,7 @@ func (rw udfWriter) writeUDF(udf *models.UDF) error { return fmt.Errorf("error registering UDF %s: %w", udf.Name, err) } - rw.logger.Debug("registered UDF", "udf", udf.Name) + rw.logger.Debug("registered UDF", slog.String("name", udf.Name)) return nil } diff --git a/io/storage/aws/s3/reader.go b/io/storage/aws/s3/reader.go index 5d6aeb186..affe9d7d8 100644 --- a/io/storage/aws/s3/reader.go +++ b/io/storage/aws/s3/reader.go @@ -93,6 +93,7 @@ func NewReader( // Set default val. r.PollWarmDuration = common.DefaultPollWarmDuration + // Discard handler. r.Logger = slog.New(slog.NewTextHandler(nil, &slog.HandlerOptions{Level: slog.Level(1024)})) for _, opt := range opts { diff --git a/io/storage/aws/s3/writer.go b/io/storage/aws/s3/writer.go index 5b582f1da..a1ad2c460 100644 --- a/io/storage/aws/s3/writer.go +++ b/io/storage/aws/s3/writer.go @@ -191,9 +191,8 @@ func (w *s3Writer) Write(p []byte) (int, error) { } if w.buffer.Len() >= w.chunkSize { - err := w.uploadPart() - if err != nil { - return 0, fmt.Errorf("failed to upload part: %w", err) + if err := w.uploadPart(); err != nil { + return 0, err } } @@ -231,9 +230,8 @@ func (w *s3Writer) Close() error { } if w.buffer.Len() > 0 { - err := w.uploadPart() - if err != nil { - return fmt.Errorf("failed to upload part: %w", err) + if err := w.uploadPart(); err != nil { + return err } } diff --git a/io/storage/azure/blob/reader.go b/io/storage/azure/blob/reader.go index 575eac045..05b50e103 100644 --- a/io/storage/azure/blob/reader.go +++ b/io/storage/azure/blob/reader.go @@ -85,6 +85,7 @@ func NewReader( // Set default val. r.PollWarmDuration = common.DefaultPollWarmDuration + // Discard handler. r.Logger = slog.New(slog.NewTextHandler(nil, &slog.HandlerOptions{Level: slog.Level(1024)})) for _, opt := range opts { @@ -399,7 +400,7 @@ func (r *Reader) rehydrateObject(ctx context.Context, path string, tier blob.Acc RehydratePriority: &priority, }) if err != nil { - return fmt.Errorf("starting rehydration: %w", err) + return fmt.Errorf("failed to set tier: %w", err) } return nil @@ -464,7 +465,7 @@ func (r *Reader) warmDirectory(ctx context.Context, path string, tier blob.Acces switch state { case objStatusArchived: if err = r.rehydrateObject(ctx, object, tier); err != nil { - return fmt.Errorf("failed to restore object: %w", err) + return fmt.Errorf("failed to rehydrate object: %w", err) } r.objectsToWarm = append(r.objectsToWarm, object) diff --git a/pkg/asinfo/client.go b/pkg/asinfo/client.go index 656657bc7..e1053b910 100644 --- a/pkg/asinfo/client.go +++ b/pkg/asinfo/client.go @@ -1313,7 +1313,7 @@ type infoMap map[string]string // each key-value pair is separated by a colon and the key is separated from the value by an equals sign // e.g. "foo=bar:baz=qux;foo=bar:baz=qux" // the above example is returned as []infoMap{infoMap{"foo": "bar", "baz": "qux"}, infoMap{"foo": "bar", "baz": "qux"}} -// if the passed in infor response is empty nil, nil is returned +// if the passed in info response is empty, nil is returned. func parseInfoResponse(resp, objSep, pairSep, kvSep string) ([]infoMap, error) { if resp == "" { return nil, nil