Skip to content

Mark failing tests with pytest.xfail #49

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 7, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ packages = ["elasticsearch_serverless"]
[tool.pytest]
junit_family = "legacy"
addopts = "-vvv -p no:logging --cov-report=term-missing --cov=elasticsearch_serverless --cov-config=.pyproject.toml"
xfail_strict=true

[tool.isort]
profile = "black"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ def async_runner(async_client):

@pytest.mark.parametrize("test_spec", YAML_TEST_SPECS)
async def test_rest_api_spec(test_spec, async_runner):
if test_spec.get("skip", False):
pytest.skip("Manually skipped in 'SKIP_TESTS'")
if test_spec.get("fail", False):
pytest.xfail("Manually marked as failing in 'FAILING_TESTS'")
async_runner.use_spec(test_spec)
await async_runner.run()
104 changes: 40 additions & 64 deletions test_elasticsearch_serverless/test_server/test_rest_api_spec.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,61 +76,37 @@
}

# broken YAML tests on some releases
SKIP_TESTS = {
# Warning about date_histogram.interval deprecation is raised randomly
"search/aggregation/250_moving_fn[1]",
# body: null
"indices/simulate_index_template/10_basic[2]",
# No ML node with sufficient capacity / random ML failing
"ml/start_stop_datafeed",
"ml/post_data",
"ml/jobs_crud",
"ml/datafeeds_crud",
"ml/set_upgrade_mode",
"ml/reset_job[2]",
"ml/jobs_get_stats",
"ml/get_datafeed_stats",
"ml/get_trained_model_stats",
"ml/delete_job_force",
"ml/jobs_get_result_overall_buckets",
"ml/bucket_correlation_agg[0]",
"ml/job_groups",
"transform/transforms_stats_continuous[0]",
# Fails bad request instead of 404?
"ml/inference_crud",
# rollup/security_tests time out?
"rollup/security_tests",
# Our TLS certs are custom
"ssl/10_basic[0]",
# Our user is custom
"users/10_basic[3]",
# License warning not sent?
"license/30_enterprise_license[0]",
# Shards/snapshots aren't right?
"searchable_snapshots/10_usage[1]",
# flaky data streams?
"data_stream/10_basic[1]",
"data_stream/80_resolve_index_data_streams[1]",
# bad formatting?
"cat/allocation/10_basic",
"runtime_fields/10_keyword[8]",
# service account number not right?
"service_accounts/10_basic[1]",
# doesn't use 'contains' properly?
"xpack/10_basic[0]",
"privileges/40_get_user_privs[0]",
"privileges/40_get_user_privs[1]",
"features/get_features/10_basic[0]",
"features/reset_features/10_basic[0]",
# bad use of 'is_false'?
"indices/get_alias/10_basic[22]",
# unique usage of 'set'
"indices/stats/50_disk_usage[0]",
"indices/stats/60_field_usage[0]",
# actual Elasticsearch failure?
"transform/transforms_stats",
"transform/transforms_cat_apis",
"transform/transforms_update",
FAILING_TESTS = {
# ping has a custom implementation in Python and returns a boolean
"ping/ping",
# TODO: bulk call in setup fails due to "malformed action/metadata line"
# bulk body is being sent as a Buffer, unsure if related.
"transform/10_basic",
# TODO: wait_for_active_shards and rollover with conditions are not supported on serverless
# see https://github.com/elastic/elasticsearch-clients-tests/issues/55
"indices/rollover",
# TODO: test runner needs to support ignoring 410 errors
"indices/data_lifecycle",
# TODO: test runner needs to support ignoring 410 errors
"enrich/10_basic",
# TODO: parameter `enabled` is not allowed in source
# Same underlying problem as https://github.com/elastic/elasticsearch-clients-tests/issues/55
"cluster/component_templates",
# TODO: expecting `ct_field` field mapping to be returned, but instead only finds `field`
"indices/simulate_template",
# Fixed by https://github.com/elastic/elasticsearch-clients-tests/pull/56
"cat/aliases",
"cat/component_templates",
"cat/count",
"cat/help",
"cat/indices",
"cat/ml",
"cat/transform",
# TODO: Not investigated yet
"indices/settings",
"logstash/10_basic",
"scroll/10_basic",
"security/10_api_key_basic",
}


Expand Down Expand Up @@ -578,11 +554,11 @@ def remove_implicit_resolver(cls, tag_to_remove):
# Now we combine setup, teardown, and test_steps into
# a set of pytest.param() instances
for test_number, test_step in test_numbers_and_steps:
# Build the id from the name of the YAML file and
# the number within that file. Most important step
# is to remove most of the file path prefixes and
# the .yml suffix.
pytest_test_name = yaml_file.rpartition(".")[0].replace(".", "/")
# Build the id from the name of the YAML file and the number within
# that file. Most important step is to remove most of the file path
# prefixes and the .yml suffix.
test_path = "/".join(yaml_file.split("/")[2:])
pytest_test_name = test_path.rpartition(".")[0].replace(".", "/")
for prefix in ("rest-api-spec/", "test/", "free/", "platinum/"):
if pytest_test_name.startswith(prefix):
pytest_test_name = pytest_test_name[len(prefix) :]
Expand All @@ -594,8 +570,8 @@ def remove_implicit_resolver(cls, tag_to_remove):
"teardown": teardown_steps,
}
# Skip either 'test_name' or 'test_name[x]'
if pytest_test_name in SKIP_TESTS or pytest_param_id in SKIP_TESTS:
pytest_param["skip"] = True
if pytest_test_name in FAILING_TESTS or pytest_param_id in FAILING_TESTS:
pytest_param["fail"] = True

YAML_TEST_SPECS.append(pytest.param(pytest_param, id=pytest_param_id))

Expand All @@ -615,7 +591,7 @@ def _pytest_param_sort_key(param: pytest.param) -> Tuple[Union[str, int], ...]:

@pytest.mark.parametrize("test_spec", YAML_TEST_SPECS)
def test_rest_api_spec(test_spec, sync_runner):
if test_spec.get("skip", False):
pytest.skip("Manually skipped in 'SKIP_TESTS'")
if test_spec.get("fail", False):
pytest.xfail("Manually marked as failing in 'FAILING_TESTS'")
sync_runner.use_spec(test_spec)
sync_runner.run()
Loading