From 729d7bb42afee55c63d2c14765b89a059c1f19c0 Mon Sep 17 00:00:00 2001 From: filipecosta90 Date: Tue, 18 Oct 2022 13:11:32 +0100 Subject: [PATCH 1/2] logging keyrange --- pkg/workload/core.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/pkg/workload/core.go b/pkg/workload/core.go index dbb9ab03..3af41bd5 100644 --- a/pkg/workload/core.go +++ b/pkg/workload/core.go @@ -650,24 +650,27 @@ func (coreCreator) Create(p *properties.Properties) (ycsb.Workload, error) { c.keySequence = generator.NewCounter(insertStart) c.operationChooser = createOperationGenerator(p) + var keyrangeLowerBound int64 = insertStart + var keyrangeUpperBound int64 = insertStart+insertCount-1 c.transactionInsertKeySequence = generator.NewAcknowledgedCounter(c.recordCount) switch requestDistrib { case "uniform": - c.keyChooser = generator.NewUniform(insertStart, insertStart+insertCount-1) + c.keyChooser = generator.NewUniform(keyrangeLowerBound, keyrangeUpperBound) case "sequential": - c.keyChooser = generator.NewSequential(insertStart, insertStart+insertCount-1) + c.keyChooser = generator.NewSequential(keyrangeLowerBound, keyrangeUpperBound) case "zipfian": insertProportion := p.GetFloat64(prop.InsertProportion, prop.InsertProportionDefault) opCount := p.GetInt64(prop.OperationCount, 0) expectedNewKeys := int64(float64(opCount) * insertProportion * 2.0) - c.keyChooser = generator.NewScrambledZipfian(insertStart, insertStart+insertCount+expectedNewKeys, generator.ZipfianConstant) + keyrangeUpperBound = insertStart + insertCount + expectedNewKeys + c.keyChooser = generator.NewScrambledZipfian(keyrangeLowerBound, keyrangeUpperBound, generator.ZipfianConstant) case "latest": c.keyChooser = generator.NewSkewedLatest(c.transactionInsertKeySequence) case "hotspot": hotsetFraction := p.GetFloat64(prop.HotspotDataFraction, prop.HotspotDataFractionDefault) hotopnFraction := p.GetFloat64(prop.HotspotOpnFraction, prop.HotspotOpnFractionDefault) - c.keyChooser = generator.NewHotspot(insertStart, insertStart+insertCount-1, hotsetFraction, hotopnFraction) + c.keyChooser = generator.NewHotspot(keyrangeLowerBound, keyrangeUpperBound, hotsetFraction, hotopnFraction) case "exponential": percentile := p.GetFloat64(prop.ExponentialPercentile, prop.ExponentialPercentileDefault) frac := p.GetFloat64(prop.ExponentialFrac, prop.ExponentialFracDefault) @@ -675,6 +678,7 @@ func (coreCreator) Create(p *properties.Properties) (ycsb.Workload, error) { default: util.Fatalf("unknown request distribution %s", requestDistrib) } + fmt.Println(fmt.Sprintf("Using request distribution '%s' a keyrange of [%d %d]", requestDistrib, keyrangeLowerBound, keyrangeUpperBound)) c.fieldChooser = generator.NewUniform(0, c.fieldCount-1) switch scanLengthDistrib { From 93f0f9d227dadfbdfd044f2098369ed15a901f7a Mon Sep 17 00:00:00 2001 From: filipecosta90 Date: Tue, 18 Oct 2022 15:10:35 +0100 Subject: [PATCH 2/2] tuning redis pool size when it's not specified --- db/redis/db.go | 21 ++++++++--- pkg/workload/core.go | 7 ++-- pkg/workload/core_test.go | 74 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 10 deletions(-) create mode 100644 pkg/workload/core_test.go diff --git a/db/redis/db.go b/db/redis/db.go index da0c0495..2aa1de60 100644 --- a/db/redis/db.go +++ b/db/redis/db.go @@ -264,6 +264,7 @@ const ( redisReadTimeout = "redis.read_timeout" redisWriteTimeout = "redis.write_timeout" redisPoolSize = "redis.pool_size" + redisPoolSizeDefault = 0 redisMinIdleConns = "redis.min_idle_conns" redisMaxConnAge = "redis.max_conn_age" redisPoolTimeout = "redis.pool_timeout" @@ -292,23 +293,28 @@ func parseTLS(p *properties.Properties) *tls.Config { func getOptionsSingle(p *properties.Properties) *goredis.Options { opts := &goredis.Options{} - opts.Network = p.GetString(redisNetwork, redisNetworkDefault) + opts.Addr = p.GetString(redisAddr, redisAddrDefault) - opts.Password, _ = p.Get(redisPassword) opts.DB = p.GetInt(redisDB, 0) + opts.Network = p.GetString(redisNetwork, redisNetworkDefault) + opts.Password, _ = p.Get(redisPassword) opts.MaxRetries = p.GetInt(redisMaxRetries, 0) opts.MinRetryBackoff = p.GetDuration(redisMinRetryBackoff, time.Millisecond*8) opts.MaxRetryBackoff = p.GetDuration(redisMaxRetryBackoff, time.Millisecond*512) opts.DialTimeout = p.GetDuration(redisDialTimeout, time.Second*5) opts.ReadTimeout = p.GetDuration(redisReadTimeout, time.Second*3) opts.WriteTimeout = p.GetDuration(redisWriteTimeout, opts.ReadTimeout) - opts.PoolSize = p.GetInt(redisPoolSize, 10) + opts.PoolSize = p.GetInt(redisPoolSize, redisPoolSizeDefault) + threadCount := p.MustGetInt("threadcount") + if opts.PoolSize == 0 { + opts.PoolSize = threadCount + fmt.Println(fmt.Sprintf("Setting %s=%d (from ) given you haven't specified a value.", redisPoolSize, opts.PoolSize)) + } opts.MinIdleConns = p.GetInt(redisMinIdleConns, 0) opts.MaxConnAge = p.GetDuration(redisMaxConnAge, 0) opts.PoolTimeout = p.GetDuration(redisPoolTimeout, time.Second+opts.ReadTimeout) opts.IdleTimeout = p.GetDuration(redisIdleTimeout, time.Minute*5) opts.IdleCheckFrequency = p.GetDuration(redisIdleCheckFreq, time.Minute) - opts.TLSConfig = parseTLS(p) return opts @@ -330,7 +336,12 @@ func getOptionsCluster(p *properties.Properties) *goredis.ClusterOptions { opts.DialTimeout = p.GetDuration(redisDialTimeout, time.Second*5) opts.ReadTimeout = p.GetDuration(redisReadTimeout, time.Second*3) opts.WriteTimeout = p.GetDuration(redisWriteTimeout, opts.ReadTimeout) - opts.PoolSize = p.GetInt(redisPoolSize, 10) + opts.PoolSize = p.GetInt(redisPoolSize, redisPoolSizeDefault) + threadCount := p.MustGetInt("threadcount") + if opts.PoolSize == 0 { + opts.PoolSize = threadCount + fmt.Println(fmt.Sprintf("Setting %s=%d (from ) given you haven't specified a value.", redisPoolSize, opts.PoolSize)) + } opts.MinIdleConns = p.GetInt(redisMinIdleConns, 0) opts.MaxConnAge = p.GetDuration(redisMaxConnAge, 0) opts.PoolTimeout = p.GetDuration(redisPoolTimeout, time.Second+opts.ReadTimeout) diff --git a/pkg/workload/core.go b/pkg/workload/core.go index 3af41bd5..25cf4584 100644 --- a/pkg/workload/core.go +++ b/pkg/workload/core.go @@ -408,10 +408,7 @@ func (c *core) nextKeyNum(state *coreState) int64 { keyNum = c.transactionInsertKeySequence.Last() - c.keyChooser.Next(r) } } else { - keyNum = math.MaxInt64 - for keyNum > c.transactionInsertKeySequence.Last() { - keyNum = c.keyChooser.Next(r) - } + keyNum = c.keyChooser.Next(r) } return keyNum } @@ -651,7 +648,7 @@ func (coreCreator) Create(p *properties.Properties) (ycsb.Workload, error) { c.keySequence = generator.NewCounter(insertStart) c.operationChooser = createOperationGenerator(p) var keyrangeLowerBound int64 = insertStart - var keyrangeUpperBound int64 = insertStart+insertCount-1 + var keyrangeUpperBound int64 = insertStart + insertCount - 1 c.transactionInsertKeySequence = generator.NewAcknowledgedCounter(c.recordCount) switch requestDistrib { diff --git a/pkg/workload/core_test.go b/pkg/workload/core_test.go new file mode 100644 index 00000000..ee2b3b2a --- /dev/null +++ b/pkg/workload/core_test.go @@ -0,0 +1,74 @@ +package workload + +import ( + "github.com/magiconair/properties" + "github.com/pingcap/go-ycsb/pkg/generator" + "github.com/pingcap/go-ycsb/pkg/ycsb" + "sync" + "testing" +) + +func Test_core_nextKeyNum(t *testing.T) { + type fields struct { + p *properties.Properties + table string + fieldCount int64 + fieldNames []string + fieldLengthGenerator ycsb.Generator + readAllFields bool + writeAllFields bool + dataIntegrity bool + keySequence ycsb.Generator + operationChooser *generator.Discrete + keyChooser ycsb.Generator + fieldChooser ycsb.Generator + transactionInsertKeySequence *generator.AcknowledgedCounter + scanLength ycsb.Generator + orderedInserts bool + recordCount int64 + zeroPadding int64 + insertionRetryLimit int64 + insertionRetryInterval int64 + valuePool sync.Pool + } + type args struct { + state *coreState + } + tests := []struct { + name string + fields fields + args args + want int64 + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &core{ + p: tt.fields.p, + table: tt.fields.table, + fieldCount: tt.fields.fieldCount, + fieldNames: tt.fields.fieldNames, + fieldLengthGenerator: tt.fields.fieldLengthGenerator, + readAllFields: tt.fields.readAllFields, + writeAllFields: tt.fields.writeAllFields, + dataIntegrity: tt.fields.dataIntegrity, + keySequence: tt.fields.keySequence, + operationChooser: tt.fields.operationChooser, + keyChooser: tt.fields.keyChooser, + fieldChooser: tt.fields.fieldChooser, + transactionInsertKeySequence: tt.fields.transactionInsertKeySequence, + scanLength: tt.fields.scanLength, + orderedInserts: tt.fields.orderedInserts, + recordCount: tt.fields.recordCount, + zeroPadding: tt.fields.zeroPadding, + insertionRetryLimit: tt.fields.insertionRetryLimit, + insertionRetryInterval: tt.fields.insertionRetryInterval, + valuePool: tt.fields.valuePool, + } + if got := c.nextKeyNum(tt.args.state); got != tt.want { + t.Errorf("nextKeyNum() = %v, want %v", got, tt.want) + } + }) + } +}