|
1 |
| -import requests |
2 |
| -import configparser |
3 |
| -import time |
4 |
| -import datetime |
5 |
| -import os |
6 |
| -import sys |
| 1 | +import argparse |
7 | 2 | import csv
|
| 3 | +import datetime |
| 4 | +import time |
8 | 5 | from unittest import TestLoader
|
9 | 6 |
|
10 |
| -# Abort if config file is not specified. |
11 |
| -config = os.getenv('DASHBOARD_CONFIG') |
12 |
| -if config is None: |
13 |
| - print('You must specify a config file for the dashboard to be able to use the unit test monitoring functionality.') |
14 |
| - print('Please set an environment variable \'DASHBOARD_CONFIG\' specifying the absolute path to your config file.') |
15 |
| - sys.exit(0) |
16 |
| - |
17 |
| -# Abort if log directory is not specified. |
18 |
| -log_dir = os.getenv('DASHBOARD_LOG_DIR') |
19 |
| -if log_dir is None: |
20 |
| - print('You must specify a log directory for the dashboard to be able to use the unit test monitoring ' |
21 |
| - 'functionality.') |
22 |
| - print('Please set an environment variable \'DASHBOARD_LOG_DIR\' specifying the absolute path where you want the ' |
23 |
| - 'log files to be placed.') |
24 |
| - sys.exit(0) |
| 7 | +import requests |
25 | 8 |
|
26 |
| -n = 1 |
27 |
| -url = None |
28 |
| -sys.path.insert(0, os.getcwd()) |
29 |
| -parser = configparser.RawConfigParser() |
30 |
| -try: |
31 |
| - parser.read(config) |
32 |
| - if parser.has_option('dashboard', 'N'): |
33 |
| - n = int(parser.get('dashboard', 'N')) |
34 |
| - if parser.has_option('dashboard', 'TEST_DIR'): |
35 |
| - test_dir = parser.get('dashboard', 'TEST_DIR') |
36 |
| - else: |
37 |
| - print('No test directory specified in your config file. Please do so.') |
38 |
| - sys.exit(0) |
39 |
| - if parser.has_option('dashboard', 'SUBMIT_RESULTS_URL'): |
40 |
| - url = parser.get('dashboard', 'SUBMIT_RESULTS_URL') |
41 |
| - else: |
42 |
| - print('No url specified in your config file for submitting test results. Please do so.') |
43 |
| -except configparser.Error as e: |
44 |
| - print("Something went wrong while parsing the configuration file:\n{}".format(e)) |
| 9 | +# Parsing the arguments. |
| 10 | +parser = argparse.ArgumentParser(description='Collecting performance results from the unit tests of a project.') |
| 11 | +parser.add_argument('--test_folder', dest='test_folder', required=True, |
| 12 | + help='folder in which the unit tests can be found (example: ./tests)') |
| 13 | +parser.add_argument('--times', dest='times', default=5, |
| 14 | + help='number of times to execute every unit test (default: 5)') |
| 15 | +parser.add_argument('--url', dest='url', default=None, |
| 16 | + help='url of the Dashboard to submit the performance results to') |
| 17 | +args = parser.parse_args() |
| 18 | +print('Starting the collection of performance results with the following settings:') |
| 19 | +print(' - folder containing unit tests: ', args.test_folder) |
| 20 | +print(' - number of times to run tests: ', args.times) |
| 21 | +print(' - url to submit the results to: ', args.url) |
| 22 | +if not args.url: |
| 23 | + print('The performance results will not be submitted.') |
45 | 24 |
|
| 25 | +# Initialize result dictionary and logs. |
46 | 26 | data = {'test_runs': [], 'grouped_tests': []}
|
47 |
| -log = open(log_dir + "endpoint_hits.log", "w") |
48 |
| -log.write("\"time\",\"endpoint\"\n") |
| 27 | +log = open('endpoint_hits.log', 'w') |
| 28 | +log.write('"time","endpoint"\n') |
49 | 29 | log.close()
|
50 |
| -log = open(log_dir + "test_runs.log", "w") |
51 |
| -log.write("\"start_time\",\"stop_time\",\"test_name\"\n") |
52 |
| - |
53 |
| -if test_dir: |
54 |
| - suites = TestLoader().discover(test_dir, pattern="*test*.py") |
55 |
| - for i in range(n): |
56 |
| - for suite in suites: |
57 |
| - for case in suite: |
58 |
| - for test in case: |
59 |
| - result = None |
60 |
| - t1 = str(datetime.datetime.now()) |
61 |
| - time1 = time.time() |
62 |
| - result = test.run(result) |
63 |
| - time2 = time.time() |
64 |
| - t2 = str(datetime.datetime.now()) |
65 |
| - log.write("\"{}\",\"{}\",\"{}\"\n".format(t1, t2, str(test))) |
66 |
| - t = (time2 - time1) * 1000 |
67 |
| - data['test_runs'].append({'name': str(test), 'exec_time': t, 'time': str(datetime.datetime.now()), |
68 |
| - 'successful': result.wasSuccessful(), 'iter': i + 1}) |
| 30 | +log = open('test_runs.log', 'w') |
| 31 | +log.write('"start_time","stop_time","test_name"\n') |
69 | 32 |
|
| 33 | +# Find the tests and execute them the specified number of times. |
| 34 | +# Add the performance results to the result dictionary. |
| 35 | +suites = TestLoader().discover(args.test_folder, pattern="*test*.py") |
| 36 | +for iteration in range(args.times): |
| 37 | + for suite in suites: |
| 38 | + for case in suite: |
| 39 | + for test in case: |
| 40 | + test_result = None |
| 41 | + start_time_stamp = str(datetime.datetime.now()) |
| 42 | + time_before = time.time() |
| 43 | + test_result = test.run(test_result) |
| 44 | + time_after = time.time() |
| 45 | + end_time_stamp = str(datetime.datetime.now()) |
| 46 | + log.write('"{}","{}","{}"\n'.format(start_time_stamp, end_time_stamp, str(test))) |
| 47 | + execution_time = (time_after - time_before) * 1000 |
| 48 | + data['test_runs'].append( |
| 49 | + {'name': str(test), 'exec_time': execution_time, 'time': str(datetime.datetime.now()), |
| 50 | + 'successful': test_result.wasSuccessful(), 'iter': iteration + 1}) |
70 | 51 | log.close()
|
71 | 52 |
|
72 |
| -# Read and parse the log containing the test runs |
73 |
| -runs = [] |
74 |
| -with open(log_dir + 'test_runs.log') as log: |
| 53 | +# Read and parse the log containing the test runs into an array for processing. |
| 54 | +test_runs = [] |
| 55 | +with open('test_runs.log') as log: |
75 | 56 | reader = csv.DictReader(log)
|
76 | 57 | for row in reader:
|
77 |
| - runs.append([datetime.datetime.strptime(row["start_time"], "%Y-%m-%d %H:%M:%S.%f"), |
78 |
| - datetime.datetime.strptime(row["stop_time"], "%Y-%m-%d %H:%M:%S.%f"), |
79 |
| - row['test_name']]) |
| 58 | + test_runs.append([datetime.datetime.strptime(row["start_time"], "%Y-%m-%d %H:%M:%S.%f"), |
| 59 | + datetime.datetime.strptime(row["stop_time"], "%Y-%m-%d %H:%M:%S.%f"), |
| 60 | + row['test_name']]) |
80 | 61 |
|
81 |
| -# Read and parse the log containing the endpoint hits |
82 |
| -hits = [] |
83 |
| -with open(log_dir + 'endpoint_hits.log') as log: |
| 62 | +# Read and parse the log containing the endpoint hits into an array for processing. |
| 63 | +endpoint_hits = [] |
| 64 | +with open('endpoint_hits.log') as log: |
84 | 65 | reader = csv.DictReader(log)
|
85 | 66 | for row in reader:
|
86 |
| - hits.append([datetime.datetime.strptime(row["time"], "%Y-%m-%d %H:%M:%S.%f"), |
87 |
| - row['endpoint']]) |
| 67 | + endpoint_hits.append([datetime.datetime.strptime(row["time"], "%Y-%m-%d %H:%M:%S.%f"), |
| 68 | + row['endpoint']]) |
88 | 69 |
|
89 |
| -# Analyze logs to find out which endpoints are hit by which unit tests |
90 |
| -for h in hits: |
91 |
| - for r in runs: |
92 |
| - if r[0] <= h[0] <= r[1]: |
93 |
| - if {'endpoint': h[1], 'test_name': r[2]} not in data['grouped_tests']: |
94 |
| - data['grouped_tests'].append({'endpoint': h[1], 'test_name': r[2]}) |
| 70 | +# Analyze the two arrays to find out which endpoints were hit by which unit tests. |
| 71 | +# Add the endpoint_name/test_name combination to the result dictionary. |
| 72 | +for endpoint_hit in endpoint_hits: |
| 73 | + for test_run in test_runs: |
| 74 | + if test_run[0] <= endpoint_hit[0] <= test_run[1]: |
| 75 | + if {'endpoint': endpoint_hit[1], 'test_name': test_run[2]} not in data['grouped_tests']: |
| 76 | + data['grouped_tests'].append({'endpoint': endpoint_hit[1], 'test_name': test_run[2]}) |
95 | 77 | break
|
96 | 78 |
|
97 |
| -# Try to send test results and endpoint-grouped unit tests to the flask_monitoringdashboard |
98 |
| -if url: |
| 79 | +# Send test results and endpoint_name/test_name combinations to the Dashboard if specified. |
| 80 | +if args.url: |
| 81 | + if args.url[-1] == '/': |
| 82 | + args.url += 'submit-test-results' |
| 83 | + else: |
| 84 | + args.url += '/submit-test-results' |
99 | 85 | try:
|
100 |
| - requests.post(url, json=data) |
101 |
| - print('Sent unit test results to the dashboard.') |
| 86 | + requests.post(args.url, json=data) |
| 87 | + print('Sent unit test results to the Dashboard at ', args.url) |
102 | 88 | except Exception as e:
|
103 | 89 | print('Sending unit test results to the dashboard failed:\n{}'.format(e))
|
0 commit comments