@@ -11,21 +11,39 @@ NC='\033[0m' # No Color
1111
1212SCRIPT_DIR=" $( cd " $( dirname " ${BASH_SOURCE[0]} " ) " && pwd ) "
1313OUTPUT_FILE=" ${1:- $SCRIPT_DIR / performance-data.json} "
14+ NUM_RUNS=3
1415
1516echo -e " ${BLUE} ========================================${NC} "
1617echo -e " ${BLUE} k6 Performance Test Collector${NC} "
1718echo -e " ${BLUE} ========================================${NC} "
1819echo " "
1920echo " Output file: $OUTPUT_FILE "
21+ echo " Running each test ${NUM_RUNS} times to reduce variance"
2022echo " "
2123
22- # Run single-fetch test and capture summary JSON
23- echo -e " ${BLUE} Running Single Fetch Test...${NC} "
24- k6 run --summary-export=/tmp/single-fetch-summary.json " $SCRIPT_DIR /single-fetch.js"
24+ # Function to calculate median of 3 values
25+ calculate_median () {
26+ local values=(" $@ " )
27+ # Sort the values
28+ IFS=$' \n ' sorted=($( sort -n <<< " ${values[*]}" ) )
29+ unset IFS
30+ # Return the middle value
31+ echo " ${sorted[1]} "
32+ }
33+
34+ # Run single-fetch test 3 times
35+ echo -e " ${BLUE} Running Single Fetch Test (${NUM_RUNS} runs)...${NC} "
36+ for i in $( seq 1 $NUM_RUNS ) ; do
37+ echo -e " ${YELLOW} Run $i /$NUM_RUNS ${NC} "
38+ k6 run --summary-export=/tmp/single-fetch-summary-${i} .json " $SCRIPT_DIR /single-fetch.js"
39+ done
2540
26- # Run dataloader test and capture summary JSON
27- echo -e " ${BLUE} Running DataLoader Test...${NC} "
28- k6 run --summary-export=/tmp/dataloader-summary.json " $SCRIPT_DIR /dataloader.js"
41+ # Run dataloader test 3 times
42+ echo -e " ${BLUE} Running DataLoader Test (${NUM_RUNS} runs)...${NC} "
43+ for i in $( seq 1 $NUM_RUNS ) ; do
44+ echo -e " ${YELLOW} Run $i /$NUM_RUNS ${NC} "
45+ k6 run --summary-export=/tmp/dataloader-summary-${i} .json " $SCRIPT_DIR /dataloader.js"
46+ done
2947
3048# Parse the summary statistics from k6 JSON output
3149echo " "
@@ -82,73 +100,139 @@ extract_metric() {
82100 fi
83101}
84102
85- # Extract metrics from single-fetch test
86- # Try tagged metric first, fallback to untagged
87- SINGLE_MIN=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration{phase:measurement}" " min" )
88- [ " $SINGLE_MIN " == " 0" ] && SINGLE_MIN=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration" " min" )
89-
90- SINGLE_P50=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration{phase:measurement}" " p(50)" )
91- [ " $SINGLE_P50 " == " 0" ] && SINGLE_P50=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration" " p(50)" )
92-
93- SINGLE_MAX=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration{phase:measurement}" " max" )
94- [ " $SINGLE_MAX " == " 0" ] && SINGLE_MAX=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration" " max" )
95-
96- SINGLE_AVG=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration{phase:measurement}" " avg" )
97- [ " $SINGLE_AVG " == " 0" ] && SINGLE_AVG=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration" " avg" )
98-
99- SINGLE_P90=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration{phase:measurement}" " p(90)" )
100- [ " $SINGLE_P90 " == " 0" ] && SINGLE_P90=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration" " p(90)" )
101-
102- SINGLE_P95=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration{phase:measurement}" " p(95)" )
103- [ " $SINGLE_P95 " == " 0" ] && SINGLE_P95=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration" " p(95)" )
104-
105- SINGLE_P99=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration{phase:measurement}" " p(99)" )
106- [ " $SINGLE_P99 " == " 0" ] && SINGLE_P99=$( extract_metric /tmp/single-fetch-summary.json " http_req_duration" " p(99)" )
107-
108- SINGLE_RPS=$( extract_metric /tmp/single-fetch-summary.json " http_reqs{phase:measurement}" " rate" )
109- [ " $SINGLE_RPS " == " 0" ] && SINGLE_RPS=$( extract_metric /tmp/single-fetch-summary.json " http_reqs" " rate" )
110-
111- SINGLE_ERROR_RATE=$( extract_metric /tmp/single-fetch-summary.json " http_req_failed{phase:measurement}" " rate" )
112- [ " $SINGLE_ERROR_RATE " == " 0" ] && SINGLE_ERROR_RATE=$( extract_metric /tmp/single-fetch-summary.json " http_req_failed" " rate" )
113-
114- SINGLE_ITERATIONS=$( extract_metric /tmp/single-fetch-summary.json " iterations{phase:measurement}" " count" )
115- [ " $SINGLE_ITERATIONS " == " 0" ] && SINGLE_ITERATIONS=$( extract_metric /tmp/single-fetch-summary.json " iterations" " count" )
116-
117- # Extract metrics from dataloader test
118- DATALOADER_MIN=$( extract_metric /tmp/dataloader-summary.json " http_req_duration{phase:measurement}" " min" )
119- [ " $DATALOADER_MIN " == " 0" ] && DATALOADER_MIN=$( extract_metric /tmp/dataloader-summary.json " http_req_duration" " min" )
120-
121- DATALOADER_P50=$( extract_metric /tmp/dataloader-summary.json " http_req_duration{phase:measurement}" " p(50)" )
122- [ " $DATALOADER_P50 " == " 0" ] && DATALOADER_P50=$( extract_metric /tmp/dataloader-summary.json " http_req_duration" " p(50)" )
123-
124- DATALOADER_MAX=$( extract_metric /tmp/dataloader-summary.json " http_req_duration{phase:measurement}" " max" )
125- [ " $DATALOADER_MAX " == " 0" ] && DATALOADER_MAX=$( extract_metric /tmp/dataloader-summary.json " http_req_duration" " max" )
126-
127- DATALOADER_AVG=$( extract_metric /tmp/dataloader-summary.json " http_req_duration{phase:measurement}" " avg" )
128- [ " $DATALOADER_AVG " == " 0" ] && DATALOADER_AVG=$( extract_metric /tmp/dataloader-summary.json " http_req_duration" " avg" )
129-
130- DATALOADER_P90=$( extract_metric /tmp/dataloader-summary.json " http_req_duration{phase:measurement}" " p(90)" )
131- [ " $DATALOADER_P90 " == " 0" ] && DATALOADER_P90=$( extract_metric /tmp/dataloader-summary.json " http_req_duration" " p(90)" )
132-
133- DATALOADER_P95=$( extract_metric /tmp/dataloader-summary.json " http_req_duration{phase:measurement}" " p(95)" )
134- [ " $DATALOADER_P95 " == " 0" ] && DATALOADER_P95=$( extract_metric /tmp/dataloader-summary.json " http_req_duration" " p(95)" )
135-
136- DATALOADER_P99=$( extract_metric /tmp/dataloader-summary.json " http_req_duration{phase:measurement}" " p(99)" )
137- [ " $DATALOADER_P99 " == " 0" ] && DATALOADER_P99=$( extract_metric /tmp/dataloader-summary.json " http_req_duration" " p(99)" )
138-
139- DATALOADER_RPS=$( extract_metric /tmp/dataloader-summary.json " http_reqs{phase:measurement}" " rate" )
140- [ " $DATALOADER_RPS " == " 0" ] && DATALOADER_RPS=$( extract_metric /tmp/dataloader-summary.json " http_reqs" " rate" )
141-
142- DATALOADER_ERROR_RATE=$( extract_metric /tmp/dataloader-summary.json " http_req_failed{phase:measurement}" " rate" )
143- [ " $DATALOADER_ERROR_RATE " == " 0" ] && DATALOADER_ERROR_RATE=$( extract_metric /tmp/dataloader-summary.json " http_req_failed" " rate" )
144-
145- DATALOADER_ITERATIONS=$( extract_metric /tmp/dataloader-summary.json " iterations{phase:measurement}" " count" )
146- [ " $DATALOADER_ITERATIONS " == " 0" ] && DATALOADER_ITERATIONS=$( extract_metric /tmp/dataloader-summary.json " iterations" " count" )
103+ # Extract metrics from single-fetch test (all runs) and calculate medians
104+ echo -e " ${YELLOW} Calculating median values from ${NUM_RUNS} runs...${NC} "
105+
106+ # Arrays to store values from each run
107+ declare -a SINGLE_MIN_VALUES SINGLE_P50_VALUES SINGLE_MAX_VALUES SINGLE_AVG_VALUES
108+ declare -a SINGLE_P90_VALUES SINGLE_P95_VALUES SINGLE_P99_VALUES SINGLE_RPS_VALUES
109+ declare -a SINGLE_ERROR_VALUES SINGLE_ITERATIONS_VALUES
110+
111+ # Extract metrics from each run
112+ for i in $( seq 1 $NUM_RUNS ) ; do
113+ file=" /tmp/single-fetch-summary-${i} .json"
114+
115+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " min" )
116+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " min" )
117+ SINGLE_MIN_VALUES+=(" $val " )
118+
119+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " p(50)" )
120+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " p(50)" )
121+ SINGLE_P50_VALUES+=(" $val " )
122+
123+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " max" )
124+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " max" )
125+ SINGLE_MAX_VALUES+=(" $val " )
126+
127+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " avg" )
128+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " avg" )
129+ SINGLE_AVG_VALUES+=(" $val " )
130+
131+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " p(90)" )
132+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " p(90)" )
133+ SINGLE_P90_VALUES+=(" $val " )
134+
135+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " p(95)" )
136+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " p(95)" )
137+ SINGLE_P95_VALUES+=(" $val " )
138+
139+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " p(99)" )
140+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " p(99)" )
141+ SINGLE_P99_VALUES+=(" $val " )
142+
143+ val=$( extract_metric " $file " " http_reqs{phase:measurement}" " rate" )
144+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_reqs" " rate" )
145+ SINGLE_RPS_VALUES+=(" $val " )
146+
147+ val=$( extract_metric " $file " " http_req_failed{phase:measurement}" " rate" )
148+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_failed" " rate" )
149+ SINGLE_ERROR_VALUES+=(" $val " )
150+
151+ val=$( extract_metric " $file " " iterations{phase:measurement}" " count" )
152+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " iterations" " count" )
153+ SINGLE_ITERATIONS_VALUES+=(" $val " )
154+ done
155+
156+ # Calculate medians
157+ SINGLE_MIN=$( calculate_median " ${SINGLE_MIN_VALUES[@]} " )
158+ SINGLE_P50=$( calculate_median " ${SINGLE_P50_VALUES[@]} " )
159+ SINGLE_MAX=$( calculate_median " ${SINGLE_MAX_VALUES[@]} " )
160+ SINGLE_AVG=$( calculate_median " ${SINGLE_AVG_VALUES[@]} " )
161+ SINGLE_P90=$( calculate_median " ${SINGLE_P90_VALUES[@]} " )
162+ SINGLE_P95=$( calculate_median " ${SINGLE_P95_VALUES[@]} " )
163+ SINGLE_P99=$( calculate_median " ${SINGLE_P99_VALUES[@]} " )
164+ SINGLE_RPS=$( calculate_median " ${SINGLE_RPS_VALUES[@]} " )
165+ SINGLE_ERROR_RATE=$( calculate_median " ${SINGLE_ERROR_VALUES[@]} " )
166+ SINGLE_ITERATIONS=$( calculate_median " ${SINGLE_ITERATIONS_VALUES[@]} " )
167+
168+ # Extract metrics from dataloader test (all runs) and calculate medians
169+ declare -a DATALOADER_MIN_VALUES DATALOADER_P50_VALUES DATALOADER_MAX_VALUES DATALOADER_AVG_VALUES
170+ declare -a DATALOADER_P90_VALUES DATALOADER_P95_VALUES DATALOADER_P99_VALUES DATALOADER_RPS_VALUES
171+ declare -a DATALOADER_ERROR_VALUES DATALOADER_ITERATIONS_VALUES
172+
173+ # Extract metrics from each run
174+ for i in $( seq 1 $NUM_RUNS ) ; do
175+ file=" /tmp/dataloader-summary-${i} .json"
176+
177+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " min" )
178+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " min" )
179+ DATALOADER_MIN_VALUES+=(" $val " )
180+
181+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " p(50)" )
182+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " p(50)" )
183+ DATALOADER_P50_VALUES+=(" $val " )
184+
185+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " max" )
186+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " max" )
187+ DATALOADER_MAX_VALUES+=(" $val " )
188+
189+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " avg" )
190+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " avg" )
191+ DATALOADER_AVG_VALUES+=(" $val " )
192+
193+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " p(90)" )
194+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " p(90)" )
195+ DATALOADER_P90_VALUES+=(" $val " )
196+
197+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " p(95)" )
198+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " p(95)" )
199+ DATALOADER_P95_VALUES+=(" $val " )
200+
201+ val=$( extract_metric " $file " " http_req_duration{phase:measurement}" " p(99)" )
202+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_duration" " p(99)" )
203+ DATALOADER_P99_VALUES+=(" $val " )
204+
205+ val=$( extract_metric " $file " " http_reqs{phase:measurement}" " rate" )
206+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_reqs" " rate" )
207+ DATALOADER_RPS_VALUES+=(" $val " )
208+
209+ val=$( extract_metric " $file " " http_req_failed{phase:measurement}" " rate" )
210+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " http_req_failed" " rate" )
211+ DATALOADER_ERROR_VALUES+=(" $val " )
212+
213+ val=$( extract_metric " $file " " iterations{phase:measurement}" " count" )
214+ [ " $val " == " 0" ] && val=$( extract_metric " $file " " iterations" " count" )
215+ DATALOADER_ITERATIONS_VALUES+=(" $val " )
216+ done
217+
218+ # Calculate medians
219+ DATALOADER_MIN=$( calculate_median " ${DATALOADER_MIN_VALUES[@]} " )
220+ DATALOADER_P50=$( calculate_median " ${DATALOADER_P50_VALUES[@]} " )
221+ DATALOADER_MAX=$( calculate_median " ${DATALOADER_MAX_VALUES[@]} " )
222+ DATALOADER_AVG=$( calculate_median " ${DATALOADER_AVG_VALUES[@]} " )
223+ DATALOADER_P90=$( calculate_median " ${DATALOADER_P90_VALUES[@]} " )
224+ DATALOADER_P95=$( calculate_median " ${DATALOADER_P95_VALUES[@]} " )
225+ DATALOADER_P99=$( calculate_median " ${DATALOADER_P99_VALUES[@]} " )
226+ DATALOADER_RPS=$( calculate_median " ${DATALOADER_RPS_VALUES[@]} " )
227+ DATALOADER_ERROR_RATE=$( calculate_median " ${DATALOADER_ERROR_VALUES[@]} " )
228+ DATALOADER_ITERATIONS=$( calculate_median " ${DATALOADER_ITERATIONS_VALUES[@]} " )
147229
148230# Create JSON output
149231cat > " $OUTPUT_FILE " << EOF
150232{
151233 "timestamp": "$( date -u +" %Y-%m-%dT%H:%M:%SZ" ) ",
234+ "num_runs": ${NUM_RUNS} ,
235+ "note": "All metrics are median values from ${NUM_RUNS} test runs",
152236 "tests": {
153237 "single-fetch": {
154238 "name": "Single Fetch (50 products, names only)",
@@ -196,7 +280,11 @@ echo -e "${GREEN}✓${NC} Performance data written to $OUTPUT_FILE"
196280cat " $OUTPUT_FILE "
197281
198282# Clean up temp files
199- rm -f /tmp/single-fetch-summary.json /tmp/dataloader-summary.json
283+ for i in $( seq 1 $NUM_RUNS ) ; do
284+ rm -f /tmp/single-fetch-summary-${i} .json
285+ rm -f /tmp/dataloader-summary-${i} .json
286+ done
200287
201288echo " "
202289echo -e " ${GREEN} Performance test collection complete!${NC} "
290+ echo -e " ${YELLOW} Note: All metrics are median values from ${NUM_RUNS} test runs${NC} "
0 commit comments