Skip to content

Commit 65e265c

Browse files
Fixed execute_init_commands bug on L446 (#368)
1 parent fb8b95c commit 65e265c

File tree

17 files changed

+418
-195
lines changed

17 files changed

+418
-195
lines changed

poetry.lock

Lines changed: 47 additions & 134 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "redisbench-admin"
3-
version = "0.9.9"
3+
version = "0.9.12"
44
description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )."
55
authors = ["filipecosta90 <filipecosta.90@gmail.com>","Redis Performance Group <performance@redis.com>"]
66
readme = "README.md"
@@ -10,7 +10,7 @@ redisbench-admin = "redisbench_admin.cli:main"
1010
perf-daemon = "redisbench_admin.profilers.daemon:main"
1111

1212
[tool.poetry.dependencies]
13-
python = "^3.6.1"
13+
python = "^3.6.9"
1414
humanize = "^2.4.0"
1515
requests = "^2.23.0"
1616
py_cpuinfo = "^5.0.0"
@@ -22,7 +22,7 @@ jsonpath_ng = "^1.5.2"
2222
pysftp = "^0.2.9"
2323
python_terraform = "^0.10.1"
2424
GitPython = "^3.1.12"
25-
PyYAML = "^5.4"
25+
PyYAML = "^6.0"
2626
wget = "^3.2"
2727
pytablewriter = {extras = ["html"], version = "^0.64.1"}
2828
sshtunnel = "^0.4.0"

redisbench_admin/export/args.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def create_export_arguments(parser):
6464
type=str,
6565
default="json",
6666
help="results format of the the benchmark results files to read "
67-
"results from ( either pyperf-json, csv, json, redis-benchmark-txt )",
67+
"results from ( either google.benchmark, pyperf-json, csv, json, redis-benchmark-txt )",
6868
)
6969
parser.add_argument(
7070
"--use-result",

redisbench_admin/export/export.py

Lines changed: 28 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@
1212

1313

1414
from redisbench_admin.export.common.common import split_tags_string
15+
from redisbench_admin.export.google_benchmark.google_benchmark_json_format import (
16+
generate_summary_json_google_benchmark,
17+
)
1518
from redisbench_admin.export.pyperf.pyperf_json_format import (
1619
generate_summary_json_pyperf,
1720
)
@@ -50,10 +53,13 @@ def export_command_logic(args, project_name, project_version):
5053
"You need to specify at least one (or more) of --deployment-version --github_branch arguments"
5154
)
5255
exit(1)
53-
if results_format != "csv" and results_format != "pyperf-json":
56+
non_required_spec = ["csv", "pyperf-json", "google.benchmark"]
57+
if results_format not in non_required_spec:
5458
if exporter_spec_file is None:
5559
logging.error(
56-
"--exporter-spec-file is required for all formats with exception of csv and pyperf-json"
60+
"--exporter-spec-file is required for all formats with exception of {}".format(
61+
",".join(non_required_spec)
62+
)
5763
)
5864
exit(1)
5965
else:
@@ -76,6 +82,22 @@ def export_command_logic(args, project_name, project_version):
7682
with open(benchmark_file, "r") as json_file:
7783
start_dict = json.load(json_file)
7884
results_dict = generate_summary_json_pyperf(start_dict)
85+
if results_format == "google.benchmark":
86+
with open(benchmark_file, "r") as json_file:
87+
# override test names
88+
print_warning = False
89+
old_test_name = test_name
90+
if test_name is None:
91+
print_warning = True
92+
start_dict = json.load(json_file)
93+
results_dict, test_name = generate_summary_json_google_benchmark(start_dict)
94+
if print_warning is True:
95+
logging.warning(
96+
"You've specificied a test name {} but on google benchmark we override it based on the test names retrieved from out file {}".format(
97+
old_test_name, test_name
98+
)
99+
)
100+
79101
if args.override_test_time:
80102
datapoints_timestamp = int(args.override_test_time.timestamp() * 1000.0)
81103
logging.info(
@@ -120,9 +142,9 @@ def export_command_logic(args, project_name, project_version):
120142
triggering_env,
121143
)
122144
logging.info("Parsed a total of {} metrics".format(len(timeseries_dict.keys())))
123-
if results_format == "pyperf-json":
124-
logging.info("Parsing pyperf format into timeseries format")
125-
timeseries_dict = export_pyperf_json_to_timeseries_dict(
145+
if results_format == "pyperf-json" or results_format == "google.benchmark":
146+
logging.info("Parsing {} format into timeseries format".format(results_format))
147+
timeseries_dict = export_json_to_timeseries_dict(
126148
results_dict,
127149
break_by_dict,
128150
datapoints_timestamp,
@@ -181,7 +203,7 @@ def export_command_logic(args, project_name, project_version):
181203
)
182204

183205

184-
def export_pyperf_json_to_timeseries_dict(
206+
def export_json_to_timeseries_dict(
185207
benchmark_file,
186208
break_by_dict,
187209
datapoints_timestamp,
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
# Apache License Version 2.0
2+
#
3+
# Copyright (c) 2021., Redis Labs Modules
4+
# All rights reserved.
5+
#
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
# Apache License Version 2.0
2+
#
3+
# Copyright (c) 2021., Redis Labs Modules
4+
# All rights reserved.
5+
#
6+
import logging
7+
8+
import numpy as np
9+
10+
11+
def metric_safe_name(row, replace_by="_"):
12+
import re
13+
14+
metric_name = row.strip()
15+
metric_name = re.sub(r"\W+", replace_by, metric_name)
16+
return metric_name
17+
18+
19+
def generate_summary_json_google_benchmark(input_json):
20+
result_json = {}
21+
test_names = []
22+
for benchmark in input_json["benchmarks"]:
23+
original_name = benchmark["name"]
24+
benchmark_name = original_name
25+
non_safe_count = len(original_name) - len(metric_safe_name(original_name, ""))
26+
if non_safe_count > 0:
27+
benchmark_name = metric_safe_name(original_name)
28+
while "_" == benchmark_name[len(benchmark_name) - 1]:
29+
benchmark_name = benchmark_name[: len(benchmark_name) - 1]
30+
logging.warning(
31+
"Given the benchmark name {} contains {} non alphanumeric characters, we're replacing it by the safe version {}".format(
32+
original_name, "-", benchmark_name
33+
)
34+
)
35+
metrics = {}
36+
test_names.append(benchmark_name)
37+
for metric_name, metric_value_str in benchmark.items():
38+
metric_value = None
39+
try:
40+
metric_value = float(metric_value_str)
41+
except ValueError:
42+
pass
43+
if metric_value is not None:
44+
logging.info(
45+
"Adding google.benchmark to benchmark {} metric named {}={}".format(
46+
benchmark_name, metric_name, metric_value
47+
)
48+
)
49+
metrics[metric_name] = metric_value
50+
51+
result_json[benchmark_name] = metrics
52+
53+
return result_json, test_names

redisbench_admin/run/args.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def common_run_args(parser):
6363
parser.add_argument(
6464
"--allowed-tools",
6565
type=str,
66-
default="memtier_benchmark,redis-benchmark,redisgraph-benchmark-go,ycsb,"
66+
default="memtier_benchmark,redis-benchmark,redisgraph-benchmark-go,ycsb,go-ycsb,"
6767
+ "tsbs_run_queries_redistimeseries,tsbs_load_redistimeseries,"
6868
+ "ftsb_redisearch,"
6969
+ "aibench_run_inference_redisai_vision,ann-benchmarks",

redisbench_admin/run/common.py

Lines changed: 34 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,10 @@
3333
from redisbench_admin.run.tsbs_run_queries_redistimeseries.tsbs_run_queries_redistimeseries import (
3434
prepare_tsbs_benchmark_command,
3535
)
36-
from redisbench_admin.run.ycsb.ycsb import prepare_ycsb_benchmark_command
36+
from redisbench_admin.run.ycsb.ycsb import (
37+
prepare_ycsb_benchmark_command,
38+
prepare_go_ycsb_benchmark_command,
39+
)
3740
from redisbench_admin.run_remote.remote_helpers import (
3841
extract_module_semver_from_info_modules_cmd,
3942
)
@@ -168,7 +171,7 @@ def prepare_benchmark_parameters_specif_tooling(
168171
remote_results_file,
169172
isremote,
170173
)
171-
if "ycsb" in benchmark_tool:
174+
if "ycsb" in benchmark_tool and "go-ycsb" not in benchmark_tool:
172175
if isremote is True:
173176
benchmark_tool = "/tmp/ycsb/bin/ycsb"
174177
current_workdir = "/tmp/ycsb"
@@ -179,6 +182,18 @@ def prepare_benchmark_parameters_specif_tooling(
179182
entry,
180183
current_workdir,
181184
)
185+
if "go-ycsb" in benchmark_tool:
186+
if isremote is True:
187+
benchmark_tool = "/tmp/{}".format(benchmark_tool)
188+
command_arr, command_str = prepare_go_ycsb_benchmark_command(
189+
benchmark_tool,
190+
server_private_ip,
191+
server_plaintext_port,
192+
entry,
193+
current_workdir,
194+
cluster_api_enabled,
195+
)
196+
182197
if "tsbs_" in benchmark_tool:
183198
input_data_file = None
184199
if isremote is True:
@@ -417,10 +432,25 @@ def execute_init_commands(benchmark_config, r, dbconfig_keyname="dbconfig"):
417432
is_array = True
418433
try:
419434
logging.info("Sending init command: {}".format(cmd))
435+
stdout = ""
420436
if is_array:
421-
stdout = r.execute_command(*cmd)
437+
if "FT.CREATE" in cmd[0]:
438+
logging.info("Detected FT.CREATE to all nodes on OSS Cluster")
439+
try:
440+
stdout = r.execute_command(*cmd, target_nodes="all")
441+
except redis.exceptions.ResponseError:
442+
pass
443+
else:
444+
stdout = r.execute_command(*cmd)
422445
else:
423-
stdout = r.execute_command(cmd)
446+
if "FT.CREATE" in cmd:
447+
logging.info("Detected FT.CREATE to all nodes on OSS Cluster")
448+
try:
449+
stdout = r.execute_command(cmd, target_nodes="all")
450+
except redis.exceptions.ResponseError:
451+
pass
452+
else:
453+
stdout = r.execute_command(cmd)
424454
logging.info("Command reply: {}".format(stdout))
425455
except redis.connection.ConnectionError as e:
426456
logging.error(

redisbench_admin/run/redistimeseries.py

Lines changed: 59 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -315,25 +315,65 @@ def timeseries_test_sucess_flow(
315315
rts.hset(
316316
branch_target_table_keyname, None, None, branch_target_table_dict
317317
)
318-
319-
update_secondary_result_keys(
320-
artifact_version,
321-
benchmark_duration_seconds,
322-
build_variant_name,
323-
dataset_load_duration_seconds,
324-
deployment_name,
325-
deployment_type,
326-
metadata_tags,
327-
rts,
328-
running_platform,
329-
start_time_ms,
330-
test_name,
331-
testcase_metric_context_paths,
332-
tf_github_branch,
333-
tf_github_org,
334-
tf_github_repo,
335-
tf_triggering_env,
336-
)
318+
if test_name is not None:
319+
if type(test_name) is str:
320+
update_secondary_result_keys(
321+
artifact_version,
322+
benchmark_duration_seconds,
323+
build_variant_name,
324+
dataset_load_duration_seconds,
325+
deployment_name,
326+
deployment_type,
327+
metadata_tags,
328+
rts,
329+
running_platform,
330+
start_time_ms,
331+
test_name,
332+
testcase_metric_context_paths,
333+
tf_github_branch,
334+
tf_github_org,
335+
tf_github_repo,
336+
tf_triggering_env,
337+
)
338+
if type(test_name) is list:
339+
for inner_test_name in test_name:
340+
update_secondary_result_keys(
341+
artifact_version,
342+
benchmark_duration_seconds,
343+
build_variant_name,
344+
dataset_load_duration_seconds,
345+
deployment_name,
346+
deployment_type,
347+
metadata_tags,
348+
rts,
349+
running_platform,
350+
start_time_ms,
351+
inner_test_name,
352+
testcase_metric_context_paths,
353+
tf_github_branch,
354+
tf_github_org,
355+
tf_github_repo,
356+
tf_triggering_env,
357+
)
358+
else:
359+
update_secondary_result_keys(
360+
artifact_version,
361+
benchmark_duration_seconds,
362+
build_variant_name,
363+
dataset_load_duration_seconds,
364+
deployment_name,
365+
deployment_type,
366+
metadata_tags,
367+
rts,
368+
running_platform,
369+
start_time_ms,
370+
test_name,
371+
testcase_metric_context_paths,
372+
tf_github_branch,
373+
tf_github_org,
374+
tf_github_repo,
375+
tf_triggering_env,
376+
)
337377
return version_target_tables, branch_target_tables
338378

339379

redisbench_admin/run/tsbs_run_queries_redistimeseries/tsbs_run_queries_redistimeseries.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,17 +52,21 @@ def prepare_tsbs_benchmark_command(
5252
return command_arr, command_str
5353

5454

55-
def extract_tsbs_extra_links(
55+
def extract_remote_tool_extra_links(
5656
benchmark_config,
5757
benchmark_tool,
5858
config_key="clientconfig",
5959
os_str="linux",
6060
arch_str="amd64",
61+
project="redistimeseries",
62+
tools_group="tsbs",
6163
):
6264
remote_tool_link = "/tmp/{}".format(benchmark_tool)
6365
tool_link = (
6466
"https://s3.amazonaws.com/benchmarks.redislabs/"
65-
+ "redistimeseries/tools/tsbs/{}_{}_{}".format(benchmark_tool, os_str, arch_str)
67+
+ "{}/tools/{}/{}_{}_{}".format(
68+
project, tools_group, benchmark_tool, os_str, arch_str
69+
)
6670
)
6771
queries_file_link = None
6872
for entry in benchmark_config[config_key]:

0 commit comments

Comments
 (0)