@@ -154,7 +154,7 @@ async def test_dbfilenames(
154
154
{
155
155
** BASIC_ARGS ,
156
156
"proactor_threads" : 4 ,
157
- "dbfilename" : "test-redis-load-rdb" ,
157
+ "dbfilename" : "test-redis-load-rdb" ,
158
158
}
159
159
)
160
160
async def test_redis_load_snapshot (
@@ -402,13 +402,13 @@ async def test_memory_counters(self, async_client: aioredis.Redis):
402
402
assert memory_counters == {"object_used_memory" : 0 }
403
403
404
404
@pytest .mark .asyncio
405
- @pytest .mark .slow
406
405
async def test_snapshot (self , df_server , async_client ):
407
406
"""Checks that:
408
407
1. After reloading the snapshot file the data is the same
409
- 2. Memory counters after loading from snapshot is similar to before creating a snapshot
408
+ 2. Memory counters after loading should be non zero
410
409
3. Memory counters after deleting all keys loaded by snapshot - this validates the memory
411
410
counting when loading from snapshot."""
411
+
412
412
seeder = StaticSeeder (** self .SEEDER_ARGS )
413
413
await seeder .run (async_client )
414
414
start_capture = await StaticSeeder .capture (async_client )
@@ -426,10 +426,8 @@ async def test_snapshot(self, df_server, async_client):
426
426
427
427
memory_after = await self ._get_info_memory_fields (async_client )
428
428
for counter , value in memory_before .items ():
429
- # Unfortunately memory usage sometimes depends on order of insertion / deletion, so
430
- # it's usually not exactly the same. For the test to be stable we check that it's
431
- # at least 50% that of the original value.
432
- assert memory_after [counter ] >= 0.5 * value
429
+ # Counters should be non zero.
430
+ assert memory_after [counter ] > 0
433
431
434
432
await self ._delete_all_keys (async_client )
435
433
memory_empty = await self ._get_info_memory_fields (async_client )
0 commit comments