Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions config_partition_filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,10 +254,10 @@ func ParsePartitionFilterListString(namespace, filters string) ([]*a.PartitionFi
filterSlice := strings.Split(filters, ",")
partitionFilters := make([]*a.PartitionFilter, 0, len(filterSlice))

for i := range filterSlice {
partitionFilter, err := ParsePartitionFilterString(namespace, filterSlice[i])
for _, filter := range filterSlice {
partitionFilter, err := ParsePartitionFilterString(namespace, filter)
if err != nil {
return nil, fmt.Errorf("failed to parse partition filter, filter: %s, err: %v", filterSlice[i], err)
return nil, fmt.Errorf("failed to parse partition filter, filter: %s, err: %w", filter, err)
}

partitionFilters = append(partitionFilters, partitionFilter)
Expand Down
4 changes: 2 additions & 2 deletions config_restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ type ConfigRestore struct {
WritePolicy *a.WritePolicy
// Namespace details for the restore operation.
// By default, the data is restored to the namespace from which it was taken.
Namespace *RestoreNamespaceConfig `json:"namespace,omitempty"`
Namespace *RestoreNamespaceConfig
// Encryption details.
EncryptionPolicy *EncryptionPolicy
// Compression details.
Expand Down Expand Up @@ -89,7 +89,7 @@ type ConfigRestore struct {
MetricsEnabled bool
// ValidateOnly indicates whether restore should only validate the backup files.
ValidateOnly bool
// ApplyMetadataLast defines when to restore metadata (Sindexes and UDFs).
// ApplyMetadataLast defines when to restore metadata (secondary indexes and UDFs).
// If set to true, metadata will be restored after all records have been processed.
ApplyMetadataLast bool
// IgnoreUnknownFields indicates whether restore should ignore unknown to decoder fields.
Expand Down
54 changes: 27 additions & 27 deletions io/aerospike/sindex_writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,32 +31,32 @@ type sindexWriter struct {

// writeSecondaryIndex writes a secondary index to Aerospike.
func (rw sindexWriter) writeSecondaryIndex(si *models.SIndex) error {
var sindexType a.IndexType
var sIndexType a.IndexType

switch si.Path.BinType {
case models.NumericSIDataType:
sindexType = a.NUMERIC
sIndexType = a.NUMERIC
case models.StringSIDataType:
sindexType = a.STRING
sIndexType = a.STRING
case models.BlobSIDataType:
sindexType = a.BLOB
sIndexType = a.BLOB
case models.GEO2DSphereSIDataType:
sindexType = a.GEO2DSPHERE
sIndexType = a.GEO2DSPHERE
default:
return fmt.Errorf("invalid sindex bin type: %c", si.Path.BinType)
}

var sindexCollectionType a.IndexCollectionType
var sIndexCollectionType a.IndexCollectionType

switch si.IndexType {
case models.BinSIndex:
sindexCollectionType = a.ICT_DEFAULT
sIndexCollectionType = a.ICT_DEFAULT
case models.ListElementSIndex:
sindexCollectionType = a.ICT_LIST
sIndexCollectionType = a.ICT_LIST
case models.MapKeySIndex:
sindexCollectionType = a.ICT_MAPKEYS
sIndexCollectionType = a.ICT_MAPKEYS
case models.MapValueSIndex:
sindexCollectionType = a.ICT_MAPVALUES
sIndexCollectionType = a.ICT_MAPVALUES
default:
return fmt.Errorf("invalid sindex collection type: %c", si.IndexType)
}
Expand Down Expand Up @@ -84,33 +84,33 @@ func (rw sindexWriter) writeSecondaryIndex(si *models.SIndex) error {
job, aErr := rw.createIndex(
rw.writePolicy,
si,
sindexType,
sindexCollectionType,
sIndexType,
sIndexCollectionType,
exp,
ctx...,
)
if err != nil {
if aErr != nil {
if aErr.Matches(atypes.INDEX_FOUND) {
rw.logger.Debug("index already exists, replacing it", "sindex", si.Name)
rw.logger.Debug("secondary index already exists, replacing it", "name", si.Name)

err = rw.asc.DropIndex(rw.writePolicy, si.Namespace, si.Set, si.Name)
err := rw.asc.DropIndex(rw.writePolicy, si.Namespace, si.Set, si.Name)
if err != nil {
return fmt.Errorf("error dropping sindex %s: %w", si.Name, err)
}

job, aErr = rw.createIndex(
job, err = rw.createIndex(
rw.writePolicy,
si,
sindexType,
sindexCollectionType,
sIndexType,
sIndexCollectionType,
exp,
ctx...,
)
if aErr != nil {
if err != nil {
return fmt.Errorf("error creating replacement sindex %s: %w", si.Name, err)
}
} else {
return fmt.Errorf("error creating sindex %s: %w", si.Name, err)
return fmt.Errorf("error creating sindex %s: %w", si.Name, aErr)
}
}

Expand All @@ -128,16 +128,16 @@ func (rw sindexWriter) writeSecondaryIndex(si *models.SIndex) error {
return fmt.Errorf("error creating sindex %s: %w", si.Name, err)
}

rw.logger.Debug("created sindex", "sindex", si.Name)
rw.logger.Debug("created secondary index", slog.String("name", si.Name))

return nil
}

func (rw sindexWriter) createIndex(
wp *a.WritePolicy,
si *models.SIndex,
sindexType a.IndexType,
sindexCollectionType a.IndexCollectionType,
sIndexType a.IndexType,
sIndexCollectionType a.IndexCollectionType,
exp *a.Expression,
ctx ...*a.CDTContext,
) (*a.IndexTask, a.Error) {
Expand All @@ -147,8 +147,8 @@ func (rw sindexWriter) createIndex(
si.Namespace,
si.Set,
si.Name,
sindexType,
sindexCollectionType,
sIndexType,
sIndexCollectionType,
exp,
)
}
Expand All @@ -159,8 +159,8 @@ func (rw sindexWriter) createIndex(
si.Set,
si.Name,
si.Path.BinName,
sindexType,
sindexCollectionType,
sIndexType,
sIndexCollectionType,
ctx...,
)
}
2 changes: 1 addition & 1 deletion io/aerospike/udf_writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func (rw udfWriter) writeUDF(udf *models.UDF) error {
return fmt.Errorf("error registering UDF %s: %w", udf.Name, err)
}

rw.logger.Debug("registered UDF", "udf", udf.Name)
rw.logger.Debug("registered UDF", slog.String("name", udf.Name))

return nil
}
1 change: 1 addition & 0 deletions io/storage/aws/s3/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ func NewReader(

// Set default val.
r.PollWarmDuration = common.DefaultPollWarmDuration
// Discard handler.
r.Logger = slog.New(slog.NewTextHandler(nil, &slog.HandlerOptions{Level: slog.Level(1024)}))

for _, opt := range opts {
Expand Down
10 changes: 4 additions & 6 deletions io/storage/aws/s3/writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,9 +191,8 @@ func (w *s3Writer) Write(p []byte) (int, error) {
}

if w.buffer.Len() >= w.chunkSize {
err := w.uploadPart()
if err != nil {
return 0, fmt.Errorf("failed to upload part: %w", err)
if err := w.uploadPart(); err != nil {
return 0, err
}
}

Expand Down Expand Up @@ -231,9 +230,8 @@ func (w *s3Writer) Close() error {
}

if w.buffer.Len() > 0 {
err := w.uploadPart()
if err != nil {
return fmt.Errorf("failed to upload part: %w", err)
if err := w.uploadPart(); err != nil {
return err
}
}

Expand Down
5 changes: 3 additions & 2 deletions io/storage/azure/blob/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ func NewReader(

// Set default val.
r.PollWarmDuration = common.DefaultPollWarmDuration
// Discard handler.
r.Logger = slog.New(slog.NewTextHandler(nil, &slog.HandlerOptions{Level: slog.Level(1024)}))

for _, opt := range opts {
Expand Down Expand Up @@ -399,7 +400,7 @@ func (r *Reader) rehydrateObject(ctx context.Context, path string, tier blob.Acc
RehydratePriority: &priority,
})
if err != nil {
return fmt.Errorf("starting rehydration: %w", err)
return fmt.Errorf("failed to set tier: %w", err)
}

return nil
Expand Down Expand Up @@ -464,7 +465,7 @@ func (r *Reader) warmDirectory(ctx context.Context, path string, tier blob.Acces
switch state {
case objStatusArchived:
if err = r.rehydrateObject(ctx, object, tier); err != nil {
return fmt.Errorf("failed to restore object: %w", err)
return fmt.Errorf("failed to rehydrate object: %w", err)
}

r.objectsToWarm = append(r.objectsToWarm, object)
Expand Down
2 changes: 1 addition & 1 deletion pkg/asinfo/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -1313,7 +1313,7 @@ type infoMap map[string]string
// each key-value pair is separated by a colon and the key is separated from the value by an equals sign
// e.g. "foo=bar:baz=qux;foo=bar:baz=qux"
// the above example is returned as []infoMap{infoMap{"foo": "bar", "baz": "qux"}, infoMap{"foo": "bar", "baz": "qux"}}
// if the passed in infor response is empty nil, nil is returned
// if the passed in info response is empty, nil is returned.
func parseInfoResponse(resp, objSep, pairSep, kvSep string) ([]infoMap, error) {
if resp == "" {
return nil, nil
Expand Down
Loading