diff --git a/cluster_kwargs.yaml b/cluster_kwargs.yaml index df25870680..c311db888c 100644 --- a/cluster_kwargs.yaml +++ b/cluster_kwargs.yaml @@ -14,7 +14,7 @@ default: package_sync: true wait_for_workers: true - scheduler_vm_types: [m6i.large] + scheduler_vm_types: [m7a.large] backend_options: spot: true spot_on_demand_fallback: true @@ -23,35 +23,35 @@ default: # For all tests using the small_client fixture small_cluster: n_workers: 10 - worker_vm_types: [m6i.large] # 2CPU, 8GiB + worker_vm_types: [m7a.large] # 2CPU, 8GiB # For tests/benchmarks/test_parquet.py parquet_cluster: n_workers: 15 - worker_vm_types: [m5.xlarge] # 4 CPU, 16 GiB + worker_vm_types: [m7a.xlarge] # 4 CPU, 16 GiB # For tests/benchmarks/test_spill.py spill_cluster: n_workers: 5 worker_disk_size: 64 - worker_vm_types: [m6i.large] # 2CPU, 8GiB + worker_vm_types: [m7a.large] # 2CPU, 8GiB # For tests/workflows/test_embarrassingly_parallel.py embarrassingly_parallel: n_workers: 100 - worker_vm_types: [m6i.xlarge] # 4 CPU, 16 GiB (preferred default instance) + worker_vm_types: [m7a.xlarge] # 4 CPU, 16 GiB (preferred default instance) backend_options: region: "us-east-1" # Same region as dataset # For tests/workflows/test_xgboost_optuna.py xgboost_optuna: n_workers: 50 - worker_vm_types: [m6i.xlarge] # 4 CPU, 16 GiB (preferred default instance) + worker_vm_types: [m7a.xlarge] # 4 CPU, 16 GiB (preferred default instance) # For tests/workflows/test_uber_lyft.py uber_lyft: n_workers: 20 - worker_vm_types: [m6i.xlarge] # 4 CPU, 16 GiB (preferred default instance) + worker_vm_types: [m7a.xlarge] # 4 CPU, 16 GiB (preferred default instance) # For tests/workflows/test_pytorch_optuna.py pytorch_optuna: @@ -66,7 +66,7 @@ pytorch_optuna: # For tests/workflows/test_snowflake.py snowflake: n_workers: 20 - worker_vm_types: [m6i.xlarge] # 4 CPU, 16 GiB (preferred default instance) + worker_vm_types: [m7a.xlarge] # 4 CPU, 16 GiB (preferred default instance) # Specific tests @@ -80,11 +80,11 @@ test_work_stealing_on_straggling_worker: test_repeated_merge_spill: n_workers: 20 - worker_vm_types: [m6i.large] + worker_vm_types: [m7a.large] # For tests/workflows/test_from_csv_to_parquet.py from_csv_to_parquet: n_workers: 10 - worker_vm_types: [m6i.xlarge] # 4 CPU, 16 GiB (preferred default instance) + worker_vm_types: [m7a.xlarge] # 4 CPU, 16 GiB (preferred default instance) backend_options: region: "us-east-1" # Same region as dataset